source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
opencl_dmg_fmt_plug.c | /*
* DMG cracker patch for JtR. Hacked together during August of 2012
* by Dhiru Kholia <dhiru.kholia at gmail.com>
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* Copyright (c) 2015, magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
/*
* Debug levels:
* 1 show what "test" hits
* 2 dump printables from the decrypted blocks
* 3 dump hex from the decrypted blocks
* 4 dump decrypted blocks to files (will overwrite with no mercy):
* dmg.debug.main main block
* dmg.debug alternate block (if present, this is the start block)
*/
//#define DMG_DEBUG 2
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_dmg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_dmg);
#else
#include <string.h>
#include <openssl/des.h>
#include "aes.h"
#include "hmac_sha.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef DMG_DEBUG
#define NEED_OS_FLOCK
#include "os.h"
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "options.h"
#include "jumbo.h"
#include "common-opencl.h"
#define FORMAT_LABEL "dmg-opencl"
#define FORMAT_NAME "Apple DMG"
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES/AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#undef HTONL
#define HTONL(n) (((((unsigned long)(n) & 0xFF)) << 24) | \
((((unsigned long)(n) & 0xFF00)) << 8) | \
((((unsigned long)(n) & 0xFF0000)) >> 8) | \
((((unsigned long)(n) & 0xFF000000)) >> 24))
#ifdef DMG_DEBUG
extern volatile int bench_running;
#endif
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} dmg_password;
typedef struct {
uint32_t v[32/4];
} dmg_hash;
typedef struct {
int iterations;
int outlen;
uint8_t length;
uint8_t salt[20];
} dmg_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt {
unsigned int saltlen;
unsigned char salt[20];
unsigned int ivlen;
unsigned char iv[32];
int headerver;
unsigned char chunk[8192];
uint32_t encrypted_keyblob_size;
uint8_t encrypted_keyblob[128];
unsigned int len_wrapped_aes_key;
unsigned char wrapped_aes_key[296];
unsigned int len_hmac_sha1_key;
unsigned char wrapped_hmac_sha1_key[300];
char scp; /* start chunk present */
unsigned char zchunk[4096]; /* chunk #0 */
int cno;
int data_size;
unsigned int iterations;
} *cur_salt;
static cl_int cl_error;
static dmg_password *inbuffer;
static dmg_hash *outbuffer;
static dmg_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
static struct fmt_tests dmg_tests[] = {
// testimage.AES-256.64k.header_v2.dmg
{"$dmg$2*20*fd70ac1e078f01fce55a2e56145a2494446db32a*32*9110b1778f09b1a7000000000000000000000000000000000000000000000000*64*68a32866b0e67515f35dc67c4d6747a8561a9f4f6a6718a894b0a77a47c452471e04ecef9bf56f0d83d1201a509a374e00000000000000000000000000000000*14*8192*70ebe6f1d387e33e3d1093cca2e94c9a32e2c9ba47d461d737d49a7dc1b1f69407b7dbc16f7671689ea4a4641652b3f976b6f1c73c551a0a407d5a335caa169db4a6a25bbd27fbbc38fc71b29ee9b1eae349b0d8a21d57959ecca6bf74bc26ccaee69cfee4999b55374605491af6d0b9066c26995209cd1b71925bcb45a8ef5727a6c20338f08de4357d4cb42cb65ecdc2344a5d7387633c913258ba40699ea5f88804b5e562bf973096337b17b4fc1236d3c8a80b9b48aed63c5a0eae3ae924a883e948f374771bba46923658f225fd2795ce0e795269f589e0ffc81615585e1224cddde654d689a3260e69683c6198bdfcd87507c23cefe36d72f8878cb27bbe5dce868752a7cce067f5a3110f20ebd31ecd53840103e0b2d44385656398edc487bf6d1a5ec3a56af54f9d4254fd20988df41eb85e366f13da1270a3f42c6672ad5faf00fa21e9ba3691bde78ab2c267a142f275467d5b853a107dbf1d75839f0e87b3b4f1d2cec88cc02a26bc4a63aa6836b0c43c5dbb44a832050385a48d46968361ebb053c2416c02458b76c95e50970922556d40b100967340a32824e6b6e44c0c1e0da7ce989d9d5ad91560156"
"ed39666cbfbea71f28797a5a7a40e77665612e977ecb8b7fe71d500eafc29d9a0ec1d0ff1723fea7c405bc181ea93c0df42f5bf886eace3cfeee8b0dba52ba8cd2ae009e75d8845264d12dd632ca3236bc1b643437881b270183d2e2bd20808ae73d32bfe88347e33bef4921fcfac9646b74f116be1f04fc353d2222499d5247fa842d0d0f00fc9642ea7524adb65c18fff87b6efd060ec850d7de6f59869387b3d4cc8e38014d52d94ead07d16b8d94327fe5533941497c9be2dd6c04142ba57e29daaeef96d0f2d109522651d797715f4bc5f4cc3fb69fa92623b5ea3e08ff78dc59913993c877f4e2c8964dffd2c8cde6c6b6738da2883505486df5b633aaa8c66acbc2886107f3dd61b1df29f54a13ef27a7d2785c02153375240885e5c54297d88827403320799e05213761549eedc1c159c922087983410d2abadf9ef8ae460d018c278a9ea724f52b866e3d7ff2374496103b5137297100c970d195fca8c1286a8f9d3859ee12c84bdaa4b56ca91e307580b61dbe435ce4021007e4a2a8085976549cf1d195f439bb6e642567f91a0224e98796614d9ea6bfab8f6d13f91b7a80a54e538a1a785cd07b5d7ed2b7e45a0658b5722b5f8844f5139cff3b33ce244946757c020c54c8b5e43324023ed11001201213ffe4829e37135686a8bec1837b35fb234049570868dc5ba9c84cef6890d9ec400a794b1723eb209a60758ba9ae9abd23a7ea9f94fc6b73d29a560e24973c9160f195fbe82376c81dfeec1a7f912a8c22c067a26786a22f0b7db298"
"3631400f120010706c78acc36ddcc29c7055fe82105f770e2dadf131ab49af93539fb5186d32dbe4a4df6cb0fdf6840c0609c8769fe242cc60d87e04e6e3be1a7884a05d9fb96c3bc1bbc769d96bbcc0413492eefc5502e9c1ac7c3f237b9851dc453b5bfa899b7b68e5e3b92711e7c92945feb6f6e452d6216e154a952cc28a3740925554d9fd44acedc8a44b0c25bbb6aa637fe9560437c08b17992c74de38fe1fb8fd5f66c2933c2d573ddc914f68f42d6cb350f126a51f607a2dd23b63e6382ec1e6ae434f47cfcd1e7d96c8293ef2994f850a27ef2d8210a0df0c219eadd2376ce36a22db56827d92a90d5e2fa55a4154c39061bd5490ba29f8309cf3e2056f761762dff56803bbe0607faef510d023b249663368977fede0577944f2ff05ead4b432bbb07a7d90148ebd1e30bf1204cd9069725d9fdbb850d3d6fde5044da1b9ffa222d99061c8ae217bc5b249960db545e6fece3ea2faeefa7702f065764b326ae0e62f3b8745cb73f35bea1bb9f6ed4fcda591f4d84da0415a0552306f6691a64a1d0efc8ac93559a79e57e357b63df48506c12dde74f6ea8fc5eeb1846c394fb8fd0fd40df26a42e53692db51bb36403305c1aff797e20adb6f8f1721e316705dcf8fe6e6989a5c3da253fdc6cb5de426f1c018161d72e34e6791d73023c5df69c0f83d3ea1d097f3a7ff37720a66868f40d3b87755bdaf508086c7e478ac1efc0dc421987af6db9b2f096a7270de91f5b3b84ee6d1d268d581718d3c534eeffbe2889388e9930cb051b5752c1a"
"b1faf1e367866af7d4b37ba25c15a030d9a5f32bb8912ce853fe7988dc62aa61264e3c5a29d18c5121a605558b15004c817cb0ab1646138cbf6375f1a179852bc22d80b83891edfd38e25efcc0dbb78062f479a9dc792e5822e09ba3e0b8ef71c62ad7747dba8cc97707f31383baa93108d5c7253dce2395fa24d77c42cbf3559b5dc0235c0ce49ef9e3cc816598698c8f8c5b32abfaeb44f3c35a01a4f47421a166d5aa893aaba80e57eb576b838c95ed6f9d5b3d389a8f86b97fe629408ec7c7ba7fd95d7625e950c7324fdd35989570b24f2e1e24d52b65ed6116e728dc3a1004d3d8fbfeeaea1c7dc5d3dc7a029f97f8dc7f740e2386eb27e9793680d959821031fda08c7146f46e8ee47ec28c7d25574eb690de09849725e490c39e524b74aecfc68ff0d760d115b4d0a126609cef83b6c80731dd17f4a307331464953c6b41875b6e5fea328fd59f275e2fabd25717781cf9d5cc52286246ebc92527eeac7acc6e2652c6fcff405e7b4a78b8f9475f46bb82a68a6e44037d61de0df58a8b7a81f407aaa260f3a49c4a2641776404fc15bfb77573dc8728573a1872e7e093663842d9368e74cbe3ae547355fa101daeaa0f97dc0a63927e54ae59fe13aac4f488e938fa67a12876d103b4a56b6eb88ff0104330e5cdc7c6886b46545d523bfbfc88f40f9654fcd0f8c4f443a225b50b44af9674166d3de36b6ac63a150fbcda2e2511ae2a42fbe51c08f7238366aada5c6be8eeb41963c6a5374a94b332012e860d6cfbc1b8a4d5a9825b88a90c9a5f"
"5615ca503698ad00df2cd93467b66d9b15876bc49895a081959132bad2e63757aa4e5ff77c6f25dd2581a3e9bb8e213c9313ceca0fcf5f8416882849fbee576d8ffb9dc057eb96bf6b81db60a82b0e6f315a13dd31706c0e36f4f21b9ce977ff6700cd77db603120d59ad8088e121cc3c502e37774b098eee7c8244f9bbe0d4a9d0deba3ec22e5abfea69ab72cdb75a001bb53672fe12b4fdbdf7e82c0bb2608de5d8e1961fb4524dd1acc890361923fb691bc5ea436246428a70b5021f9eee2c637eeab574babde4c0d55f57925e511ff623af5c4224d3ccb9c8572179e2610b4b79817ca18ddcb5302151f9facffca96269ff5fbb11e48209e20145bdd70d72bae54f6fbb89a3396bdaaa3d45413e3c5bc672ab98dfbeb3274156096f641494c1c946baab7c388a16c71ce5009b32f45dbbe37998906570045027950bd758b7ab2f72c243eccf9551d539946a99779848b16cddf9f163fcefe1e1ebee3ba7d5240b92698ad56a036274ca798eae19b0dbcf39a1c0ea1a58b29dc0e3de89def08e6c5800c94db47b7eaef5514c002d687b4d99b00fbd44137f56557830d63156f43bf73db8b330bca0ebb4ea5d50941b758929722aaa5452cd4a4e00640165dfc35fd35daaf929997adeb4c4f7611d66befb80809dc7bc6c763879c3bcd8dd0fe6b621898717fd095fb7eb403b07591b931a8e16ab488b01acd636bf4f1e71d5460532b8a3b00d7353e84c071de5cfa25de685cb85b569e08d2f177727cda11f196b040d25c97ccb83e355db98c2bc14844"
"1ca95b5f612020bc53a81184ccd0c5f14bf6d9fd6318ec28bafe8d668cb3c98c56ad416007bef4a3ed9e12eafe8f9e7d87fbb02d1f557b497db1a2c0fe40ec3f23ea88332513c68f724cc8a8af6636c9f332a8e55c2d41fd81a23e92e9ffacd3ef14cda669e7dbe31ca08a5238c7fbfe7020933087bf2ce0a7489fd5a3becce5de09628234f60c833002aa8e9c9ec51f57c8e4ba095c1d054750d46d64041bb1f567a82d63bb5e88fb70bdddad0ed7572229e56b90e74dd88ca829f1ce8424bd24a0bbfe3dc3f77d244ee59f364b36a4b05fb511b5b0d7f876c65ab4233803543b0a68b9d2d6d45d292f91eb4700c2dbf431e40c77a4fcc3ac3fdf3a2bae3df35b6417b8f1eedfe84cc65a07c426780871d16ec5ed3201ea4eaa778b71f04cc1999587bb4645bbc43e365395e9188c85bd024f758304aee979f8e67d07636fea251423e920e2b7258580d1918fce772bf02ee66926fc5f9a3dd6a8c89e6ce7e4fc03d4784296df1a9152a1fc66050983a287e3520bf3e04d900d25316c8bd5ab489bf97a2f31f4061f895111caff9968ecb22d75cb9e5400ca1d0fb044acb4fb9cccaa4766cf6c63ae5a7a3f9af90d1b225067f671d85cdb4e2e21d2850f351d995d54520fdcbb8cb30bfa82190ab2071eb8bf350f984408b206597371736110114d12d79da4027f9a58c8fede63cf16fa552d2a956ae2a49c83b0afca3056f87f1e27bdeb9d14a7e5cf30550017a3233c4f386769021a853b971746aa28aa69ca980bb02979779c5bd29259c84911e2b252"
"61b92be669e8a731dd74edce66b6f3ab5944695efd57c0004ff637eabfbc02ae346528fedbf2ae80d420580adc4d571a37fa1397fc2b85ec458d5262c15620c88f2dca0eb1bae4ec39d67fef56ecbdf89703919e5a6767d0f77bf6f0f60ba21003d033c9dc3057df18d855a5801110fa9a29a42ce10a44a39ed883df249ccddef8aaf832387e70048d9ad6014cc17f9a2bf7146696ee4eed388d06a45f7bd7696e57500ecfada9e9eb17926b16bbd90146e406e281141f0a918c320cacc9d1f045ac1bba87ce8d1d45cb6303988d5228da6ad33df6d2a5bd7f265b8f610078e9db5fa3db0e08286e500063f0fd6860a11d9985226ad382a95bc3c3941d43378ea1bf28fc85749f616092d77e7c292e311337168b52eba08ffc0f76582710a1a7d33c55162b3c7fbf227a324e1f4579e035ae0fa17fafb1ea964aa977490b5a3fc16c75e1fc50a6d17e193345b71369df804c61a71bf60be4281c3d1f945c690368c23caab006f9dfc913dbe6119d6fe8349cdd424db7074726e8bdd0ae99e2bfb9b800ddb965c06e0587cd10108c9b431cad4fd10d3654a22ceac73553a6b2b2218ed6526c362df46cfa776e2caea0de61b9d5c0c74e03e299ceb2221ed0f30ffc5876354d5607c3eafc77f78e4fce5e0c7f6ba7d417ac5f0511e2635b41b28dfb4f2fbb73d351a69fff920b76f5687386114b3d5ab9cad056c88840a023b7e2df73f007852763570d38a966c8258365b014a12a3497f506dbe55c073244333547223785438372884ecd8b66aa0a794ab5fb"
"94b0a519bb3cbf01b43463c0c7fc6ebc67754ca25686002e13edad54c817b0aef64698637d18a4a8bba382add892f4918b720aa99b09ed2a6e02b7140f89e3e00680f37343d3e47412d04ef78005b8b9a23b92d145a8da9c5efafce374955727367a7f1a179b990868550cf960c6df6baf2cddda5fe3e689de8dfcf1474db419ecf88cbce9de7a58e9d8a15991fdf5361846273d195a2892fbc95ad079ca8153910984c4694edb4c790f430043c4019fbd96fe49d8afa5e7d1f6674e4a125bfbdc916b0d3819566898599443ebf2a87b1fdaf41378227d396d2d320dc5b860705bc87f45eba2b6473234fe054267698dba0913ab1234b46697c54e2b19526d1ad4b7e3eab40a413f86170fe9f2a71eae2fb959a021b0b43516f1c8a3e674f37ee235ade79ca296364b0cad5ebe8449e09b63a34e8711587f7f2fe6e181a787b1d3a8f30012ce9549abb834fb80c673c575a25d3c33bb6d846ac231f411dd6422c59215e0a267424c0c57e6c9bd5486e8b6327e9dd16b7065eb74ef91ec9204360b03d08654a4e418346ec2d4d21edd5608a76903494791546d430eac38178d158d61951de3c61fbe5d56c22cbda4a3d40297f7abd83913e8b483d9a80cf000810d90a921f453bcf9e35732d2579c1aaef4a6980c666e3b273a9f91d9918f850bd6e4475d8aa5cb616cec58d6ab6d70dbe2b0f7ad85618b6e60dd4ff5d0faf19dfdf27a9ee48cd7b2d6613e76f04ab6ef5f0af12966a90875816c27c4297a2bf622ddf66fbe7c211670d0c46c7295b93bd2f1"
"22568df3dc46e9294c7258a0b7e81b2d45979680edbb7ab323e4857d84306ccc16ca79c711144eab7b37e3437245d7b78ced1cfebfc45892791b9ac6cc1211f83e328ce3f57af3d89b5be89dd2efeac9d738330bd0d8d4a059bfac06d1ad73bf6d427541e559c3d16eb5adc4380c1b25c1b8a9097ce7eeeed1c5d6884dd1a32ee2bfaab8371593a0eef65f80e705b9b56adfc0db4c272024a71947755032a5ebc1bb346ee8a99b01b408cc0b1658a319ffa5ab2eb87e9aa8b3dd9d9d92ce3bc04e4ebcc011a280143927676360f249ccdaf7949bb23770a06ff5861661d36d761508f7e9ba149310d1347c3165e07997853d415abdacfae9579d1dc0b5990a05ae9e6dce8931ac2db9414546dc64f8161a64cf30b9ce8c50ef2a99775f03dfc2c611e780a5cbcc27cab920a87d940acd8b3fd42897ab6f51b29214275bd564c50eb7aab3ad19a2c903c84d2ed5a23c49c81d87cf3244505424332c917d7b671d4a90765b8953c26bb7ed5dfe3e93632610ab44296afee2b5c631fe643a0a78eb9af94d700250f5a82bc57d24825423f1ecfd8cc2bb0daa229670d0d9a4fb342ee8c9b7b16d86d29abc2a57633303b918ac78ea8d2672dfdd4a06ea0bbd756fbadfb0c09e2426a65e90ca829ea00ad66ca8c9e79b9aa5ddd02d435cb23014b1033da00381ddf2dcf408660d1eebd1f6c7bf5ae9fc3fe47e75ff7ca482716534a9f3365f5cdb48f3d59fb19d11bb8782ef96e394296594812e8a7da23a953f6117ce577e55f3d6cb1d3a4007dc7d252c7123a8"
"37be12884e54ad10757af405beffb5cff189133bb7df5fc009544b2d62ec44fdc0c1c8240d4413af5b36e031510b1f1537a690ba7049cce9df4bf4dd63f6987c513992fca78a1cb7e8d670fb43a52ea2ca2f49724e35397041e5c75a365b510f40fa9bd076377274d6a95af801981d71972da0a08b536b024f439c43d13902878798153ed825ddd7dee8937181823076f036caecec170edf1b5fbdd84e530bc50a7acc257bb9679d72de3f115602d18d2d12e6ecf4d3242ccbe9a71a1483e7fe40d2447ba028a76aa92c13516ebde90dc4d204095a554cbfad79d6efe4ec540c7b51593413465b929742b729ca688f67ee9d9fe76431fa81217fb135d0dd6ebc91904efcb0cb6dee22867e5ddd7453f530d04935f41575de9ca457da55b67791d2e8b83890b5be543366b92ba6579a6f19f8e82a0bd87e379967766e5b0a58305b984778c562ea03a8b8392e3160ea4532b6ce5de74bc8fa0e8ebe88fbd62a73d7106a309f5a5f5d7617664b015e166fcd87906caa80ab4eb3e62f73e527b5d951a0ed0340fe17bb7b2692e4a31d14798879788fed12413bac50e490ab93ed66311599a6c1362fc60da5319ad907c7ef7852985ce86246276a138379d2004772d4d9a989b83b3e780bdda9825ad06a4b3dcc9a9d4d8025cbdee7cb2e02ea1f77bc90bf4ae56903859025b7283ba6410aa91933466623b996e9ad07e3095e376b11a27ca451c246d5561501e69c6747013ecda44f8d1fa50a75572453c9ddecc07b1aaeebc04cc7e976915f5e68d1236ae2ff"
"dea4b9fc4f8e91b03982801e2ba604b46ad80f966838ae09d2734c6482dd16d7738cadc1276593a336e2ce8cf7ce48d1535c7865f7b90445ff3ab9e56f58e254115bc07710de50d7953238d7ca419013d104d90fe79794995c28f219c963d716bf8942e0cc5cb432aafce4afb42f74596b847fde5d87fba9adce5c17fe590fe58e60379393e521ee194fe063211d72c29d58f7dde89addb6b0e20515ca7aa270df2ef2d77f92219781502c49292c6c4a985242b9447521cdef5a52b53b5eefcc43e8036ebe90b51a3565cbb180ea1b3e3d20f63b8f420c2a7f01c475428d5f63c66f122654af4edcbafebe34970c152767cf623eb4f1ee33931a79622cafc70cdd2bc7ccd55ecc1e0aafde3f66f5414315048d3c5c51638c35fa920cfcf7a18ada48a589c12e4da2c801cb8bf3b182463707a17891cf296ae8aae6a8a88ee3d602cc1bb7647861f65ec1a278433ae08d8c8e63727633425fda0b86d78378ac80b1bc1a48abf270dc2b5ea71691eeeb979950cbe0ddfdc451dcf8e3dc657060f4c3f96512b21bcb228a966381efa94bbf5ff4bbf38a803b6aafc719a545e4d0582a62e81e6468aa04eaf131f8d2f545c060651e115032f5b3579fdfb95a2328f5c9a0308874630e840ae1dcec1b9543c36267a9651c94c91cea42a93a91ba3a054ded4a8343864b449e46abec49474e218c8c541b00eb0f8997e710025631ac28be3f08126446dee0cf61bc69b85e4fc021f203c796cbd2ca16ebc8fa15f55510a08ed334155233c6459d2d428df31a3f376c"
"d81a530700b3ef08631dc5b50f787d4efe2bf219bd17f0431803d9d946255716e8543bf77fc44a48abc70a97feae8398c2059938d39fb4ac5f7214d92bb89fb9c45b6d117fd51f6207935beb1a89963fb9d1aa020669bf809c21154c20e720aa1178ed2bc13fd548e0d7d01eb1d028aa48318a02dc7aa412e2ae01ff59a86dae40771ad3f48f0fa54b6e679854be00deb9938e37ab3a4c9a96f3b7849ac75b82619cbc806c42f4bc4feb1141f6a8391bf9335f643ce5cd2791590b28b19d03cca7b5cf702f10ffa0317327e828deb4791f71500f243be77a451e5759c6c711b38f8f62757c54d7fc6dc586a90df7777d8cf1c72f9c0947af005d770f4a74b6c9413738c3b5ab32306ff5b41a6446c2de3f59a27b79d877d3f05fe22d11afd69e49e59f35b3725a0ad126642f388602b7816abe397a9c9233cf7d1e12a00362306d2d9b81fddb279544f35e23a8c198930f75986f26e6f292ae8debe5da0a7a5b8add2be71efc78179eff7fa2a2dad35863b69e85e8172073f434f48fb03f7bd1bc78fc2badbda261a68f7bfa171c898897b3b0d4852920674b8d9ffdb37ce66c1b6aaf9b375253a0d74eba4d359737f7fddb42471969d81605e41f615399c5fd6cce1808e9b511ac54f75f774e84b00970474f5136447af04b4866ab6c54aabf7a247c6caf3ee891fecb14073f3cfdc7368ac00f6b1c9b23e301e49257840f949a57c28a95c5c490bca91bf979d40403f7b9458bd255df757e6eea0bf41d5175548aa46243d98f2f0f6c754d6e7e58fbea97"
"7d7e0af8b7d0a6bce07d0c483293868a914a50aaedfb9b239b4c3c472381535b287a4146fd52e7bf882c9c3eff7bb2fae15d5b96bb1222d81d26dba563ac550e716b6c08b062cad6702a33a9db4274fa2e81af815e8325101d5a9ce9b345e29619da9e45dcbcd7b0935d7dde07644edc6b049eee9371511bb2cac50ec1170c7aad835c54fa52c8e0a0e8446356488e09c2f07b17413a7ddb872d05016aba129cc36de609831863747310f0fa443480a47524dfc5e1f34eef3ba2fefa29e596e7fff86a924462781930fab55e71fc2f06271e62878e51e0db08ee5dea31f1d2afe9a4f548ad6a4f4763c9d0eecbcdc32323aba1c9c12554a5cfedb5310b4a03caf426a80d725fabd557493c46f2a174aac851d3d39529d5ad919fdb7fb0dc1e5b0ffdf706a9f5af36fcd2bdde28d68c5af4a1da4e67cd44f97b555b62b39cee1274b7c3dd3971ace3da6101c87f9b8f28c5e13d4066a3e63543825dd8bddc3e90b6dc75bac78931da98929a337817f68deec6065f6f7883d5bb10cab909c9945f71a672eb2cda9fadf4a8d9da906e2a5d1f589193b4e791772663f1bbe751498bda065f90244391169d80490208083de39bec984af73dc99b10d85958f372004a03962c45c531b347851dc5e26bf7bcdd68c9b129524d6734282bdd431f991170d6a5c67138a5405d8005b355ec7ce95496a8e98782f6d978c42c30a17db9c12671d82f2d3e257f66980f20bb6380303f1e89b10035ae7bdb3e55d31f2d1574784aed5c95aa09aaa9614989d957a65d893dbd"
"abbfaaf30cae0cad575e39f5311aa00a6979fa52ec12dfb2f731a3ce5f8b6097a612c2ce98f5898eb2d1780d0cf9ad30ce5395ae871ba7ca6a0884a13c09732cefc5aed9d7a28c09041cdd62e75d7396432545f0c16496b7f5f516fb2cc603c0ec10a51ee952b7cd0593ec00dddf67e27dfe3f0cdc5bf737170243a8ed3c1f59733fb47bde4b6578d7ef11f95790d4c678d95ab2cbdb1673d2d516c189af00f996371077276e672f1223926fdcd6627ff86816906edad3aa97e3a9e7346562add05ec1a94c2dbb7f3b28ef537715a1d69761bfb8c2092e608311af2f79a4f8188665a48539944374437bcff6e59bdff4e4b9e4dce11307d892915071157698460b9e9fd68ee0d1acd21434810fc8ae702fb8dc794ad5364c79fdd74c8a70f390556930fc2a23064f36411c626179d1d745d4875f5c2b37292cb8ba37bb78d419f05e9a5d2245a38da20b6b14eba2d5ca3d58d23bb5ade1322cf337eb75a97ce98c167b6305907c3fe18038bee1e2450c3095480f99c9f12d2b543b33866e5546a39d539c6e2d639356bdbcbdb3b4e0935ac76e0fdaf54cfdf241d2c5ce135324885f8cd69e6562f48979352bbab357c6861c66b4ff7d9dd5d32a8ab8b6e759a2f5ddcee847fa439a5f9e3989039aa60751019eca6c7dfcc2464ca4a1ae12f079d200961797cb0e52cb046d1f0cb1d97c4699e07f019b48edd6f4a71b99ba26c2e5e72745cd9bb9a7e89d8eaba646461bb76818fcc447de2820196e32cdcf4a57c527c52f64d316b513f6a611c929890be5b0"
"3b3d3352cef23bf86d0e058b1cd9c4a10a9a01060aa9c9cc4bf42c7c6cbb677724db3f0c3736461c1828e67c9916e953057024371bb4ad8995672f760c47574bde9df9e73af90773cd46c9df8cb655f8c37eed8cbda40da06304471e32bc828a7dd9457fbe4d63a15633009c1a9f003f3db7f5b2b5e3b22c60f747d5627bce3eb4398a543cf24b18cf0a56728adcc253d7f5343245c1426b5bcd9daff94394499cb6d7ac2b4e63ec424c66f5dbceaf877fc13f47e744aca7d8b5d89c8d5621f4e13488b141062ee04c2312528a0a987a5d32ebc6ffae45657f4b2d1420890970e363a124b75374594dea0560320b36133e31d6a978f90ef079b81484503c7fc3edbceadfc9fcea06f271a60ea6c5d434b694ace1b506eaf013aca2c6103acfe6c565a5a24cdf638f8ee282ac812e32cc2662a8e2d4a31239952836c4896870d973bb65b280f0370f4c3a54c7f4723b2bef522ca4c233d7646da3fdb9743e273afa1e3bfcb947eea9f323ca908bb4961b214aa906cca1d2d56eff25d60952cc5897ee6390f9af4efd5d48b2aee8734cf6b8042f2de75b107f8d135d9a63148e88e43df815fe7871a354741f8863af4e114ed0369515bca104f8d3b24a2d740b8617de3e96a23*0", "vilefault"},
{"$dmg$1*20*f615ec6c463799eccc6a2dfbedf12c6bdc422a2a*56*a595f4a81a490e7aa6378034661da57a424f922c971d3db3f856f8d54b0784bcc5d7182905c4237153c5d250b8aee1d26410b1dca7b1cb73*48*74a060efbaf2c79d5523219d8162c425befbb2094fb46e7ffaedc7cd4f192e6f0c47d8aa91e0a3201346725d3ddadfff", "vilefault"},
{"$dmg$1*20*9c82b419bdac1b3e6b71f8a6b99a7501f34b6950*40*5da479e292e0acf67a9fa3e24d0a767cae2f645ff63836665068637188f4b80295de79aabdbc2536*48*9b136165ee73418631ccf28d5e77073788ae921df596649a7a7789585db0f13f446d5927967e2ede20ce8a4f5389185d", "vilefault"},
{"$dmg$2*20*839730be2331c69df4f729ffe8a10c26653bea94*32*1f24e25712c2d70d000000000000000000000000000000000000000000000000*48*3231e20aa642889a7e087cb87c84ba1cd52864007cfea677796a6f52e16b2609696dde9230aeb5603aeb1f70f6701be6*14*8192*75884a049d2b7a40c14002ab6e511bf3c73ca79a2bb8285a3d2ac1d5b9b0cbf92d4a483fb762bae8485dc3fc9cd7a54141da2b74a86ea833d253d56f52eecb9dd4d40b9f846690378cb8a5db74fbc6d756ef9fcdbb5d21805ed43a7fb45d6caf6b3d2564f4a7760030aad69ed9e56789e8b2699bebfaac3cd73130fae1d8ef7f003e765e86eb84e990f3c24780022fdff3ba283ece4fa8d31716e5cb1ea22e408431eeb2cda1460217efda86461e940cb10ae602a84ddd22be53064e66c0973a04405ff17afa020b24f1bb4ce42750b28cf4e98c4f542576e712f3c2fe0a0539a411290f65ca763a94d865fc24b1beeefbb6b055db453da38e62bc383e74b188b86c54b62f589334de8ce3ab2e4643f76eb4db95bfc088bea8c4e88cfccd19b89b818fb698982f73df634c8a8148e4c8d3ec2dab02aabcf48ec0a78686fe0b4f5e589a067d6c54f0732e559cf9db5b4ae1f0468f5681226d3b03002cb6ec528b96470f1d1aee5d3b51b4c5f45a2702830ea35056e02279e76fdd30b3ac174cd91b65fd6a26a192f6e632b0fae660d0861059a62bc512f610f4974c22993bbafa364fd2e8eb53d07244d165f990c876320d99070fbfa6fe7e0ca42c0ef2f17205ca"
"7196376d4026a8a93fa83a99cd3b6cde354ed3122dfc07ffef91c24f2036b0d83467e120b85a92fa04120cc8f7af3196adb6420f519c610983d163964b0cbd048adfb89266d9ccf9845cd17ed04accff9d106b7bfffefb365e97357fdb9ab2d0956411c0c73bdf235a9ea4b50962c8f258583899ff2c0bad6602e8a3c14f3c870fa14686d15aa17f5cfd1ddeecc7b061cb5c00db7d198d083a690ecee97a1b4b0251349beab744c4bcb53a4c1702d1094f6591ee5ae15a29271ee3d3d22f0f833219c3676236c9e9620a206ab6ab08fe5fc663f4f2ccfdae6e34adc68e59fcba5363f44cbc5d8345f184ccb38d52bc2bbe6ad996c3d4316ce644698bba6044209d108c698c3d18f4b64161651224cb015052d2e9bee0079b779d77b6623e9669c4ff99988bc612c4099f6b8bc9719444cecbc5f87bf9ca6dc30f3b346c3cf20cc342cd4d156ed67c8be0f1801c3e672bfdf2fb9e6c6f1ef3570d059405a8a0c5bcfcd70f7bfc1d2417e3ca205be70a5ffc9b4d1d123ff64cf72b20df25e9861e1da57fd1311451e542c25100c19d1d70bba2c26752e4cf1c59a6373fceceebf2b4c392a45e2cc7151f4cc1c7292720b5f0716cf7ea752a8a44cfcb7f638c5387a410efbfae90598f2d99cc79baa298e30076d5ac8a2094dc14d81953c09fca8b41f88cbca2274158b93fe5a151b93bec1fdabe1a6c67807d5f9d46b2a19ba85f9540cfb54656fe473216ee1922046c5b6cd08b325e0c25a420765a61e5f7a266c9e0ea1148f0e62ec65736d4cacef77940a0eb"
"24e93b7b656e3b591f5827e78b577b628da26c1e5bd7544dd439d15ca21a3fbe96d3833ab1bddbb03beb8f0fe39517958b7bf43afdbc68b5061b41145e151d228bb5e5220b31a86878be40060839855db438368e40dd6b8d534c5c39009455c0a783455b41b572f2864eed60e5dad80979b97efd6dd08549c154b76f748101396847efd56a97b82cf62a25e26ecaebfa35d545cdf886ecc22460cc0e2983b9da14ac41dd1e1dead58a2c29a85f6bc900268d755d1158939470c4793359b50da19addd3d8f722c0a889ebd8dc69bd955b524bbe452cc98834613ea48d7a73a9b93820c0ba718cf664d82a1745451a204a2845d4e2a846f0f18923ad0315896b1c1ac1942fbdcba119ceed9e02b0e707b28feaba44bac94888ba1a31670cdce6348d58d2072eb13ee805d569815fb28749c392d11eb06d8b1746ba8eef3313072fdb4685f1401717933fd18edbc99e3d89d08a4c7798bc1d724d6bca02a31642ca0ac6223884580c0be8f6508a6650b783a9ef24de3713f65fadcb2da6d68c4bbbdc216ff91ea7bd24bd7365b91087c14edf70dbd4eceb2676797ead7fbedae77a0add9d22a515e2a79d075958d8fb87aa62700c62df007abaa3a5e002403205fe04edaa4aac3da6d08ad9ba909974e9091148208db90f330b2c2c702521d4b1b32acc4fe6b7ffd9f96fdca05b6c404afcc789fb9ad8c52063fc0f9b9cb4116ee11f07aa17dff57b889a4f4abaedc51a07481c1e954d78ead32c6e808d3eafe7cfa9d2d4ab4886abcd2f64ba2df2d8d507cabfa8"
"d01f785409d71896461adaeb4e34d18f9b2fa38779f0932c27ba2f3f75ece12f6eaf7a0d728dc02e97cd44ff175b592b8234c3e3b5491726c58dcf0a1b77698cd38d861fcd549aa793f8d2b58d6afd1d9b7bb96c8936c960eaa7072c00e69f68f948ee24494b8152bd8e5d6923c8eb26023dc660d202e41663888a8e8550092b5e1610452c79069b3cab41a2e7459dc0d361ded09c9f1589999623f6deacf276eb72996a355e4f7dc19a5217e9dcb2d6a3e4679bed9f980a5dc8f24a1c5f4eef00d706566e12ac8deeee964ab9501be5e57e326a6fcb794e4f4fe14922704206a343724913ca2e1d26e3d83cf994cb7aaaf9a916ea6eaa06987a9822c5a8e556b16ad72d5f5640b3490d6b0f290f9f2db7c3ead435e534406dee40366efb98f0b53930a83ff9bad177b84343d204a1083801f1d68b3aff78ec4246f670f924969e4608b419ea9f5aafec40d902492f62844d9a83d65f38af2531b875b964abc781b3537c708fe65f70a11552990447bf6db287412367ca918a39d9e2b2e228451807b01174afc33f5f67d45f9c765015da6abd318c980fc8bcba60ccd5193e7a8caa54193aa83bff7b77725be99780da88b3209a3cec620c17f979fb16e640473b0d98a2f492702ab99f2f0f83bbdcabc2a6dc4986476f420f112ffbc7bddac8cffe59e82ff558151b9160e2f99bf37a05654253321591ef31d01b32b8d69297b3bd57f127e9f574fd472b6d29b6e9a0e1fd43252bc1f1b2c8c959f3f4d80177b4fd6a77dde8fcbaf1eabcd5e7f6d38630f35d"
"efc161ba7432cc9af6bc73baabcb343c469ab18e4cf88eee21e49311b4f20077bd6e30705338f047a9c7bbdbe4dfa6d7be3a827c92823a3c8f36909f9e4df4dd91426b75ac6b5d953357929b0bcd91ebd24e651a855755edca82c4664d3c89fca6001ba88688e5ec8d5e5c3fb145b963b29424192530601d74e3b815be85ca44640ca89c57ec4ac7084639b82e23f065ac561779c040cbfe63310ec846db02873203feccc3f88a28fa78d8d567905abc9f8f561b4a29ec5c380849ada42100c15efd3d73fc203e63a315cc27b82f62c4ca0df9ea213dbf7eb39552fcc38edfba0ce7e25dd097bfad5224369f1d2a175ab88ee5a3371daece3342e99c60cde76a1ff5dc7e5ebaa7e0fb59d4d088cfbe7704126b2697d62d7b82289a35ea778ea4ca347410513513084f1fa971686724761f711a916ae1e92402ff3d52f948fdbd9c1d961c6ad6923c8ae9cf3a4eae7a9369daa5cbdadfc786e873b90ed1e8f5933ebd011081ae7ea236c11f0c53e00c1c0f9206f91e6954123b5caa08c7615a787c1661dc17f297c8ed2ff6c90dfdd9a262ab5e9a4489d6ed7ac032f72bcbbc2248e7f1675e2b2da0bf85caf89921fcd8e78403f11a28970f673ec7adbea798b3eff87fec642ef77c15b3f3d19dfeb74d1ef6a38ab938692207133aaeaf722aec4f6082a4cd742bd37fba0f1f83f01cd2fad6a169c4716940f7d74b8f29001f406de5897a5e5d813b995df132cc57a5d9bdecdad9024dff7dee8b89189d35085a70bba2e5e0a8c1c71cc593238f3acbd1337b2c"
"c5a8647ce6bbd669eb939279d3b964d661112752bd7fb877c4c6ccb5ef72ff5446410286fc69347841c5595a3408e0c73fed8984d0c0fdd2544a168ccfe41386702f6ab7b3675a78b57f9782f23e0471e6dceb176dc9eb871ddd92dc0b86b2a11293523189c75019200a45213f0cbd86823f65f28cbe6569a58512dd469431322b7ca5b9b8ca57e56a139dc4788ffbac10fb57441f2435584651fa572450a4719c8c9b4a322f3aaedd3693a55820c725b63096d3f211d830d39aa89be83d59b13145dea9231266ef6b1eb1fdef31203922308cff81b166426d662989a350ec712dba14ced58df7dda0d0fad05ad8d9c6b247307d481f79e6a3cffdb2ab9b21a8208d6d7faa72b6f22a505d2b950884474862f6f67effc81c6292f3550c4e8852c39c52d952648b256e961d478c0c6979300c5188c490ce5c1e34ff6dcfca63c0f0571ea616651ef6f9781f2d355dbca208e56948ab9e26c5d2d3f8509952bba3e93241837b11a89caef6c956c9354ac10425a6d8d4e82bd5d7411d18655393d7c542a7c914a5ea6aba717a226e0f51200cc949f38c703f4f6ce452cc1d7d6ee8acf26d34f74981f6850b11610c11d1c5e6689c1b6fcd6b6e997ea145851c6655560c33dcf5ed7315578263c39fe6a838c5de867f1b3cd482c0206f56ebea0617ae25b3ca8d7e13849bb2b58ea4e21409762d549636bb7cf5ec32d3216d827d94cba1f36e7632e3a43b3203fc596cdbf879d1aaee90804fa0cbf46d08ff4c40aff8fb2b46f7ba8ce21d17c2d3d025b67702054e"
"9d76716fe7b5c9d2f43036d86e6a17924d2f160f91110ed1f3364a1177aa6193baf59878ec84f450914faad409618bf25cae17ba5545abd33833ebf408990fa4236d322089aa42eebea965e59456250fa14bdb61a32be8d70372891a83e7bf298168c5431e0b326229c36c667217bedbf64e3a07019534a087e84cd1a9cf35a889d9e65a7be63e8d638373774148e127b328734963437e7f00253d2fcce7bc0d798c09326ccd4f379f8a29f2d308ab2fece6fcadd653b1a3ba53a078e51a1a87e8dc03c5c118444d82d9166c0c4c1bfbe8ee09be6f8cd497a20132d4b6e1edd13683b363dc6587de2f11cdd51674ebdaafc41654d639b6cdbcc040f5889efb1f64e1b873442493ebffd8f867f0e1ba2cc629bc5239ded578336a9e88ee8b2d1b71f6d9303cbfb8a35e4015d2f9ec25eb4618c2ac17166e8964b68a66e60cb7b464e36a2251243a218ee542dac96062ec7db751273435dca23bf3e8aaea895ef1d6f6bdc98fcb6a9e0658dbe734450682cd1a3fe16161a9fbd035270fc86684971e20f1f1869546e1b77a481774c9449ac6499f376bc3c0f0efa589abe3bf676fb385ea50618c681eff6e5359678f078292da285c4b5e66d5ddb43499abc3558490aca6481299c351c6b053739d0065c187f59767e7de24f1b7bcd2d80d0ab2e7c789a9f5172a8411a88d2c69d8f9d2744ca7e42ba8478648df29919c23c0f4cf14e2428c792f2d8abae1073b97d86c2d5cf2e5beebc7fdfc449ec3804a81199d6c4f24d9b040bd1feeaf141b7eea626c1fa812"
"e499b74e86dded2641ce3e11a04a35c8b8831a4de563c3614b4048eaa656d8dea460d2c46f6d748be434718e9f54934804756fad07d2a8ace694bccbd7bf2e33c09199a22a98726d2e1a690b2a9c33e39c8746d8125d93f675c571247b0a060114eff4c32231898a05e3ced4721edaaee9ebab9b46692c65f086d9fcd34b86a499685010ae0f4423625263d0a2a62672624662a6613bd4235b7402573af1b0571c364f7c14e277b84e4a102b1055a1456b912431f9ce9e875056f8b48345ab09bf06b3de6126fae32e2bd61d2fdea29a2f3cb46d963fa40694c02657352b9b9918bc50fd7e26584e51ab5e4bbcdcbc18b9bc17d3efc5935ae5077a269fb8e912dfc91a2c287686590c3e2671f6d29365c044fac2c077fb5ff280b0a4d69eee3b9538b4c8a029a3360902ee8291ca9f1088074f307392b70a7a43ceaa07c47d175b286c052e2412237da3f6acb1eb6b1ec386dbcdf5b49d2391615788f401ec234b58b112d296b389ede47243c01a1a6d18ca5dd3f2646d483b97e41370faa1c023118a1d2006694debebe35046f6e5852952bb520c9991cf9dfdcf89e51fe29d3cdad6f1091fc7c450782f06b09cb8aed1e1f95221af7ad369e49ed672fbbf2d255549d0fc0398dc6b4d37d038a8dc9e8d9b4d6faacf3c5fd10663107cec0e171ea6e1c26eb8a1534646e0813ab0fb449d15b4865eb2e9914d404d06c1e284f66e39d09e99eaf7c2f36997ac6ecb9197f8ea7fbdf7da38e427dd5179ef265f1471a096fd24d8ea2a2ec3b820c54356cd912f06"
"9accfd370ca945e60c72b5d479b15d52a5c3c4423c73f4ec06d9201ddbfdaac2e304b1408674d40c203ed48fbf4b126904900349228b28fe262539c9a12270632f28241198381c6e7174d275227c99178ef4942655ec95acbc19a3b96fd1e07b5e0e91488c979e7e25be5ea733bc3171b2874801157c83a6de754ecd05cd78d6d2846e7ce19f641bdb53075dca078ad0ddfa871c16e47da96d007b5e2b2854d151dccfad21875fcd12df56dee7f4aed6a54fa248ba2721ab2f58c1157c85a3df8486f99295f2c9b8e8cd7a65145b69ca93d0ac4fe328e31c07bc1d0af2db886266def575d74be200ec9a4ccb0213743eace8d7d39f810e3877876082238d72c375a5cbdc4d7de36c2ad90904a173df80195cff86f19a0904d18a1f8a92cc4779e5997dacba58770c5091dab5b832dfaab2d0fd102b99e3b8a799ac6e7357b294a31db5f9bc3d04036a4a6e18dd47dc88b0f07e1c4271e5106f329731ce4dea9f56f6d63beddad788d7eeb955589a13990cbe3454b07f63477642613bd77f3bc5d024dbc5c55a0c7426ac7cfe63dd2da9f0d5a7e816dfe5856b646b648c302c16b50296882c62334c9b8e56ba6dab63a9c787fa153d04e5e64503c6bbb9bfc8957d2fa607ecdd3714123dd52b6f9c1a3a73f649dfe67fd7195857955cb8c5470a9f363116cbb580b793033280dfb63ae47b384e6aed677251b63a7a27447f37e9817f10f27c4a0560ef34c0255617cfb90769aea2e5971077cc89022f8a44493d5157ab2962946c7fe600a24f002cfc6108d345"
"469a65f2f29b55e4da3f4c767324f173a11567ccc401628f2934989b29875ededce223de3134b7e99384f94436bed28329daff8da5690984b491d43f14d86d5a5e783545442f913dfa39f25f6360d2143fbe4c7e234a40f65b2c48ff5835c3fab67a92d0adbac9e63993db052a832b1c7b6045a495b82ed0d7f1068ec96fe1519493f7376a9f9f331f6ae89420fd1b523278df3e78c7b957f599767057113d5a1895801f1fff1b7021fde8360c4fc1ec8165132244b680645df7a1c0673728ca6323379739905856537091dba18f762b7be6f5f7e95212c402b005d73dce6a7775e90093f927edcf0d9ca24d04809f953ece372414d5f987ec2ae030dbb547db5ec17bef47dcb097fcd2fdd873eb93a99e2209425d4fbb589530fe41bdb5daf8ad8f83e48557a01d2ff6b658368e39bc8324cc2756160cdf56b8d7fe231aa03e82bf0b3f55eeaba71133a6bbf72342727a52ff7d158992895c61c0bab4cfe42ba5e4d5f239ef5efb6433dff84a02e2a5f12bfc35c1062e4103a3f8fdd1c5be28bc83725023c8a72d2cf5103a7c97a23b2d9903a1870726ad2bbaef7b7a6dac3e36c1b92769cb3f43eea1faf95c53db0cda2a8bea38efc1dd11695bb5de4baf583b175a32d49f98c37510e9e56f3d9e10bb4aff163abc91a36f24fb38d33d87fb4299d5ceb5144c69cb741b03d35436002d7740c38753e284a808a77cc1d4ff9e63b9ece720e778497c25b46ccf757449cb3b3fa8e5bb6d5a9f6eab58c97e9469cc6192b7b31362453faac839327067f41f25ff"
"34c2cd40e9fee3a0b8133f266407587ac40db20e7d7d397e90558e54250111f540a44a70d427497b5a06c8ef87f6bba0082e00d42adc7eb38e890dcf5cd426c1bc2b4c781b07670382aa0d13e227e05c1987d3cd0241b5ad78387e19dfe4804189dd8a10cab05c79409b9414a6a384cfaadbefcbe8e3521fcbcaf52d92dcf1611ba3a824b576051aa24f42cadd7b7e9841375646740f2a6271d81d2d5f4819ae6a5d3f1feb6f7923f4252872c3a2709a8b8556b3977af8c4423bdbcf66ade1b3c4303539e06957e8930aea8ff70d6a202407aa44c6c8dab0232a33ff3f3ee9f61ed664bfadde8d294022da21b10e0aee583379d8dcdc078639cf3a1ee18d6ee1740bf1b917ff56070bf807b90d5a19f37a5c31214c6a19532f364d463595262ca057f5865f0d55636ce080acfd4e303f03372af014a3c32d2efec8f7f6cd6c825e5edf309ed16008e50aafa2584804c1897f6433e350cd91e155ac786dd9c3deb22a39d69e85331086842f32ba7cb6b4d4f13e08d90acaff24315020f7efb2b74214b14e840d739378afadcb06d45e7bcc17f2a03ed54d0da71d865508900334386ab96e11b88d2811c84539e4e2a93aa27d66620500789bb4d595a8b2e5972b1805d88af2b722e1e9b8aef10ca3dcf5ddbf3d20a6f101bf8f8a8cad825946dbf0c64193689f461bc0c62d138f902575ed601e26184a10ed9df17ad4be7c9672147c0158f132452ea502948a749b474cd0a63ae5cf942609e4864985b4060239d0cee6c78ce4dfdf5750b51ffbd5ee920967f5"
"dcc52df6771e286eb83dac1c576f1a073687411cef3701ce6de66ed17bfe0fa5f03c63f96fb40ad70b478aae1e16efe22cb9e8c2aa57d5498803d35fde7f920b32ec686e6091a9ba6eb91fdd17b3302b760d084bda32244f704e14af619a5c9e72bd14c4e69f51177a26174c16d2e3eac934f184d460df5640fd84c3d3dbbc6785c249a501203374c0d58852d52c4c64a6d70ead2af1bca1d61f6f4cd00c3892565e085d3e603a0586d176f478062b092b205807fe7438a065ae7dbcb14f69c92cae4000dbd6804bf4eabf112813ff0599a29b1fd8bcf9d0ba7d9b14e40e38826b48204d8c0a50fd804167c88056cfe77e7a75ac36b5bd049571639b3f02a7e973abfaff1327080630a4bbaf6a096005ca2ccd54f076f2c3311e6e7b48bafbc9de38d01c8a01ee41d25ff0f775a2db4e34566e377683bad9a133482ab87907769bd783bd170b616d48974ad332e3defe94a2e7d6eccfb4cc43cad93b53c476e7795a087fe58cc074b591315daceee3c02af54d9beac8162b70dd9863bcd7702b7c8c72022856f78b2d249cacaea6c1dbf1317ca9e35664c518bf4155501ae77ecc3f47be6e7151c4d5fe56b893c69f1f939cdfd2b68830d9ea47a89fa7b3d4f620e0909d5a97f2637e2eaf223f25fb5ce7949e3ceb87d93db628872fc469f58a749e8b4841798ef505ef2712a3ba713386dc56b83e504c3d24d2ae8200698f9b3eca8d7971f7b82dbd5df6deb34865e2e6336fcd2fc3ff00bf9c8d04992f012dc9473e347ac05aff1040f010b1683c10dcd0bb"
"49b7b5883ceb6c0bee4bd2ea6d275f884a37fc7151245274f208a457f4bcf180d793de68f09c7b03e7e430dd34e553362f91c4e721926eafd54d6c8464082d2d4a4c5b4b44495ddb06290f01913e68c7cd95963242df31741eae89eec41d0af689518ae335aae42c60041154356ce475ba0bc7f6c5ec798cd7c493aeac5e08d7ef554dc23832161a615a6b902e1d4f7bd076f3bf045360cdb73c3b2d7c158b74d2b718b95189225a0824a38836d1d4dbc5a2861e62f8a8c2723cbf1fe8951860f0cf7b4c6bc4c307cca509435e077f3947b8fcbb8ba1252b89d61b69b0328a2b1c31255c2c9df670bc244af42599cb5982878fa363627b321302255f2a20e04b70e8f4f63638af83a98ba40c55ecc46230798224de084d2cc203841d91c4f049c9b0a98535f3f905bb80b24679de883470c8225af80361031354483d879f98b78cdc5aeb07b371fea8355d146f9bbe16c9178f3d83ed63e2812048a386ef85d6c35ad696936a008a524f358ec8a2e40081c3c50b73fcdc6199f59e14b6ee213a8161f675d5938ce72a848ba9e7ed930198d9ae6c43dd86d94d88c5312be17b9dc590072e382607390e247869674ff446e8c37d89b7276aa61b5ebeb0ab18f500389a326341ee13283965dd4cce69b666d2c114372cb0e5b5d9921cfdb5e12aea0d95ec0a73c8d07b3b3e0dd8d159d323feb4bdaf6ea184bc2fbed75e7cc13bde26aa597ea7eaf0e37aa4be069c2c629af7debd8692befbf74d6c9939165e3238d8b2b573001ce957942b199e5c57935ecf5ae0"
"c3b161b96f1f637605bc29bf5230fc65524041d9970e9b4bd6e7469e0c0bfb62e672b30a7094b014c27a06e3982d83a951ea4207a4d7b38eb155259b847ecba4675c3f82c48343a07e2d5fe16d3189c8dc0f4bb1fe2ca4abce4638a4462f0dd79d69c240eeac8ee4bea297bc1bd5683ca97a352712bb4461fd507f9125f895fc7ca8fc76c7f78207224d0fd142669137ccbac0f023fe1700eef77abc804e9b9da27ad5c3a767202a0d0a36f8fe86e2a8ac5f30303c39fad8b65a206239b881910f9d904f96edae31e4befce7822a7399ad06355bc3c7198eb1a4b2c7c8b4c92a604dfa4905109c35edb62dd3c817cbf5261f5069bccbcf98da9ee5ea192151237b31131953509157f833bb1b482cd011c361d768347b2d0da11b1dc43b392d609f0c4806d7325e92f9d76ecd278fcfb9d91e9993addffa55d66acf9211b7cdcf28c73bd4e7cf83a869532c90f9880bb963cec69cf40e117b3fdf9c0c5c9d6570a2458aa9d14716ecb8b6642a4cb1fe0fbcf8298ad0db3c676b9836910658f03bd47ded56ed210cb1e2f1088c87f4e225faabf29e2d450468ff6614f282e15b4a6fbcc9463a16f802d3ba071fa5b009403478f1088ca8a8d9eded648be7394aa6bb3590c0725ec87fdcc53c4d2afea49ba11f9f2b3231c912bdd9431ad941a7d89f70d8e1669e90553b047b5f4a033437fe3b84c05105227efb5390e6e99b597fa1c35a1940f513ee8aaef9485d1ffdf7ce94fd34dfccfa8f178dc113c32082e0345f6d39294ef283b6f9a566a87b1122e74411"
"8e643cd6a2ecf14e47d68254d26942666fcf957586497c72c9e5814ab3371fe4b0f9a7fa1e5d9629d0dfe9e93fb388865a599076e7ba983365fb3bf574d335787416c099c545feeea69e3069d841b62e4db9833e6865e24cda78e2bc46ee83ad5d79bee507c44007200e64b5d1329930bd658e6f051cdefdf758e5b023650c2abda7a6827ca394c086057c617dfa8c161ea1f953446d8e0d5f6d5c76bedde8d596d1641a973e2b53bddb8f7bfcfbd0fbe4883f4d6d4e6f930e51d47ccc40148e6ed1b409705e9a777f1bf86af2621cb1f04ba160a5faad78a0949032e9dd7e34bbe6b2fa1c478a990d3b7c474a2f81af7f7246bdcc669df005adf397cef71869237c53126d1301ceab14011a529d4897cb00f7d93f35031facdcfda8110b9fb5d55a057ac9087a9cc8f1034e03f79a806db8a8e726e8afbfcb2c7c39d3315ecad3a2e542d94753b88717b7791c66c47a45f499885f6c096cb1093d9dd6082ba8eb2132e4a80e22ee309b7f74af55530e190d73315023fe4b52fca855a06fd111fbe1125910f4ace6dcf228447c007cf82fc50993de0202d28aed32ae795d2d75ba8c975b78c657af*0", "vilefault"},
{"$dmg$2*20*186673f316ce762e8f2b2595b3e8ea204aef584e*32*df036556654b76eb000000000000000000000000000000000000000000000000*48*71793cfc457320157f12b1351051f60e59fc80a728f82f0156cc8b3f20f75bfb4289c65e6c8c21589f3dc6187540551a*2*5953*3c25089e22f54dfa868b7460f43185a32b6988681952eca4a493ff4699e2340f8cccd06ba2df28334dd01b83f8bafa3754b7afce8f859ffaf64d33950a817d5ffa9671894f71d6ef35aefd00d237f7f8f413b8b8424db42e6fe7bf503d1d4222d77d5c3c2a16f26a1e15d7797cedd59fbeb45f70ff7731cf8be628895f13cc2937f82c92e0d5c6b6ee0214c668ad1ee4f41501dca668af0f83ef252bd6b6444f9028f12ce15134fcd8610426b5a6a75ac25fa938f93280143b5c991a683fb008a08e133a962dd4e3aa9ddb57e72955e3a840c3599b84d874d61cff4236fb487e2a344ee3311d30a531a20ec800ec591607edb97599b297ac67e173a4f7d98ce2d73b66c37659bc75becb65b799f0a1642a4282ad623ee574091821c971363128e307288b4377e1e90e831b800936f2b5eb05fd5d0e505d71e7e34311950812131c5b742ea238bcdfacaf35e23a4b5b9ee2a7c0da6aca0ff02595fd4229baaf700eab8ce7ea772e133bffd5665ea3ccde2edf61d11e64dbd1919454f977a31292416c86e3e11b762a3c6f0c27cf1a07ba3c4197f21c8959e0f04fae6a086be6e77b47495d0cbfcfce05e34ef361d45b1f8c5068f0174cbb2ec9a9f37eb6ae1fb0887"
"17630b97bf46c801ca598878e6a8a96b232266479925e8f170bf76afa4acbcc6c7daa51c2b9a1821e5b5df170a8b57aa371019c240626b2f2a9d60587c34383ea7c12b300fb478e2b62ca9bf54b00f04f4970a68d6689c4087713e9b6be1e7c92ef16a7cd527d1ef33140d8d3994c07d8ae237e047bf478f164aee1c6300545bf986e570a403ef626c5fd14044611621bc5d5f37e417175a22288c2fb45b0e11e946f755fccdd774e5ace72bd2ba44be8f673235e9b49c0fd4d6a912493fa797bd97462de0402f77da7eee2ea6c0d02fa880ba57390eb1f73927d4616b95067d18103ad4b10af7a40b35e620211acf4c9f47fd12080b2df1d350d17afb649ea5e8a038157561b107e7d1d00284a59541c0b759bb424d2795ff1d3bfd7749461a9f67502df649d2d69e72036ab4f8869c7bb35fc999a9179612524e2f9bbb00e7dd5ef8fbdbfc486447ad5ea93b7220608aff49eebb98a1de88c68ce2b9846a63ac6b8878fd645bfc0c0fea6bb746b15301f58d2b9d2ace73828a623885fb495761be85780668b436fcaa6367776dee9e3af641ed5755f1cca7a931c97162f6879c7a3bf6eb47f98590d07654be8fd8582c5774f89bebf6fb113d75d28afe74443a64af360f41b9d243d8fb865039d924fff4586e3c76d9d0d43f8487200e802adb9e01460eb6ad5538d8549999c4b38c41dcd878b8dbd049b853aaa4426e74226fa19d3d501e6a93aa99dcea681f0044e15a05c2d08ae49f625ffe88181d2c1fe55e91b6f602409fdf961af1da851fff67f1e9"
"c9ac10dd3960f460bb8f937ec415870cb9e99e150f5b2a2308f2136960d199ccf5900f130a3f4610cda347991cf34fe46717071dd5ab2e8dc5bc20757fe6357fa56a18a606b25c51612975f51cad52e5a20a8eb2cefc79732fe19baee7b8c65167e2949a4ddc8d1e262b47c97286c2d0fb7078b3f553453445053d82a865320ead1ff4bf4fea84cfd7ce21e7aee696a15f92da1f3d73c394d47a254247492fec3b6582c94cad0df1b1b097048c9c91bae6aa269f5a074b796bf86770059cc767aa07fcf84010b1686437042d16d693775a03d9832857bdde9f7d98392bbcc579db3bddbc58d8cf08f04064e3eb92d87829e6617efab245cfbb6d564c5fa333ef560d6105c525e39177ff5530dc154b691b1dabf14d0da99229a04ca5c6e7956d474c0ee578b1b287b0a5971506687670ea848820c44875c74e69a79b36eaa3cc2a5a27fd5098f0fd3c190089736a271ecf3f14b3259cab95b941bbebfb5be132d875328a1b0ddeed958e8ea454ef80724f878a2a690bef56fe3ea62f47cfb6db303ae608957dbbd57735195d6b1b2ed73e69d1ac4b4b4fb01c20eddcb29e8b44bbd71fc25515885a56b8b7e55edd4c21d5e8cc43417e94e57cc49f279d0ed740b286d4e27c0b909729c4250ea2d1857f3f7d801a87afcee46f455f8a53e211fa0a311006cdde262ad4bc47941bc52db89c4b454b7075bf29d9cad6c98b7e84318a071789a78d1a83ece7a24cbf17691aec06c5fb7bb8a832c0aa33b27a5b3a68ef36364fd85cbd19e8f75e184c3d1cbccaf7eb"
"c71211506021ce0d38bf8c0885a205d7f4a60f7fbc972c7e2365b07d5a52fe8ae02608c7bfb1650ebdb4f2620f2698f5fc90c7b42a34a31732d2cdd12a4bcae3ce399623211946f74c67c5e82c0f53701bb4460504e17c1d6fa14288a63d97a86068be8ec36670adc16670b5cb3c09972b596cd441e4bb9b50471708bab77691417517e91883df9f0b353c2bea3d0acffe5410097edd2b3886592cc70ccaccbbf64d168637a8a3fff0d143e497e5311a9b13b4adcbe8d2625dd1fcb5ffe9c83ddd4a1cb3046616296faed945fe7b29ab6f912be6959f8768ce28958f2441a1e161147145a1621693b9f2d24fb9c7a89535456dab48dbe15c689709e2af6a6805edf923d8504f3d2cb8220ff9966f854c84e9ff04fbf45e42a5c73df4f719b9ed287695a4a03d5c0a3a964a7b6e95bcfc36a292b23774812e8567a02cb8a5baaf89afb900b3fb7be40c9e8432656307fbf2487c0d1f3baeda11e803f9f298e7e0c478f9fac11a43ca32e2cda46ca6491cc7b31aa1725d24805587722248dc326cf81fea4fc1ba9a58bdce9e34740e3732b96889b36e917cf029c7027c5cc985f8b3f0fa4e504325d56c7e653ce903e8410a6b06a2126b3aae2030404441273c1e486bc8285dc078c1874635e75cdb753a0fa821567e8116179b78039f8cc52675d538fe38a71f46792af445b125dcee671bf7789f2e874b25f05a431ce574a2d85762ceade5e5cfebfa5ff62b1ef5ee155fe418b16638c1562b29be425e05ef0237f03bb42181f55d4370272a13d5fbb353358d"
"a434519cbd0e4fca54f9cad4a7735238098d3984b0cb9360eccfc63b3b4339e0ad2b2719552085d7445681c919f21a6b482402c271e34d7f9fbe4fbad68eaf825c57d22ec0a2c5ddec8c1273131b867a3760626abe779e37ee632f41f212e9a9aaf26fd5cb28df689d9c4875c49db62213faa1e18c35b5d2df1fec21852e7c35d20d6df85ca2a6b10898b244da31dbb6de3a3a8553601c0dabf1e5f4755fc77c1561223cf0b1ee43441c3aa9d855df0831db6a7f6949ff0ae1cdd465aee616b789c268417de07e9c0f0ddae6b07ce5186b3b83ef96fa1ba9fabda1bd79986efa852a348364e33e89458550049522e64491a9b24514665af058b4be4ba690299d3c2379b25ec97575a9312b38d3106f805e829bd77033f4d5f1b35ffc7289c118749b31f17babb56f48aec597049d635c055d056db0434493a379d15010f3325690444e1021abd622d18ea7e0b5d5b97054708ea9087b4721bf857e3504aafec84516feab2a6f6309a506cd3e931ef3ef47807feba8ff0b6dd56eb83349d99be8633675eed19be804c06d4d81b0a256ec95cfbb2b6565d7906537c5adc404713baa8fc2e0f425c577660df47198e91d2eb3ee7a9a5025641aaa759e7e1f3dfd85c83a17a6a59df4af62bc669f28d12544254f4e0527a6b10958664af9378e41aa9f88ef3041ee6880f23a858254b5d0fa7899655e9d06f12fa863b63c2c950a0c3eae774149502f0fa3c3a44d24add7f9426ceaa21dcdc5408f0b96d63dcfd97dc4a3ce03ccd56c8d48ccb253e82d50123e8a51"
"76ae5d1b9cf6b6c11d2decea9f91e9ddfea605eec75391ffc4e01f4988c0ee78ccb3adb8a5e16644eb30e7e76ff251192fb3a8c48a68224a2cfee4aefa616ccbb68abea13d335a4b212b0b9841a42b418cf413fc868a842a26950e11061608a623a5dbd520aaebddfd1a559705e8cadf6abfa272925651f84130223b0056be28b618bfdfb164d2c5db86d82ac0eb2c457198a6cf8b0c2f2560eeac4441df45a9192cdef63a00adee0aafed7e0ab0bbb0c0b9a066f9f45f5e0c6a9376a069a45512081ee3edd2e9679d6c46d71e3740c5ada7457fc5d21610edccc2bef851d18f89e8307105855da15dfa749c44370b8149de48309f99fb5040d05d0739a64cf253855c185550339af73be6d5cc2de3186ff4b004ac816c1f4afcc83ec3ad66740c57b9cf660de7ab97b0771189fae5957751eec58a3aa6d3ec6121bf767d13533ff413c84c1ef47142f51ebf515c3d60a3c5cc3b9eaf9d43d2a84b94ce02db3f254862cf3c6330574fde5f8257c215c416ac3c9833839d5b33436fc12c21046025a4b0be90f18dbf002e001b8541b888835ad138def9910c4546fa0cf496bb4415463cb10004959dc6b0e379c18090bbd1aba6e9588fc21a89778ed1a1c0533049867569691aef6bc310fe4853e9e9bdd94a58943017a197526c70d2d278c66e94aa97abe5af8d9faceb0fd4e102bb69c824a1e4709be2125de420aebb11506bd62ae6b32eb1bb2cbcbc35dda3c992193086b11203775b33dcf4206a976b31222fcfd8b0e6beab7eed02f9f6d0dc2959929e1d"
"30c856a672379ea1a20bdea6e023fb7ada31f6f9e02f354f464b2261879372c0c92ea462ad11a83d54bacfce3febcafe14753d697e905a7c77031beb83076444aebdb99cd1aa470d5774ed91cded7eeccf7fb18860fc39577a054b17aacae86d02c2dabbd3ab068c982cb095d135c11daedd863bf9abafe991656d1f7773cbc05aa66c4c800b5763fe845d06c3b19f4f73dedbcd50ea363aa11e8274d541ab754209fe7fc159e7bbe317f8d9ba602bde8fe02171f8daf608bcd4663eb401c7a3f2cc814bd8fc195cc192d4d6fefbb15b9d9738f5e6ade7826d65b9d8477ef500afe2e40077b6ecd7d3ed78233fe980332a313fb2fe854d6becf9ab4c1008cb1b16a513d3fbed8036ddaaf372e8891c59c6e9bcdaf2d88e22d528b975d1a36af2fa792028a3e1161a74545eab1cd6284079c2353ef1c49e3e1242ea52d22d8c7d64f553e4c396e7d62c4a6619ec698b56cf25cecb6673d8a3a703f65e480f1b8b91e4427e9f1e9dfa1939134d03cb3115167567835d449f50cc9bae06adc68e3211d8e0cc1faa34f7bda6e1cfb088fe980397f4643e89052d2bfeb233ad81c3cd466bca1b1007e2e6459e3aa1e51f1a326a2f5d89407c05946b0dc7741f458464b5e4ceea5e367a2e4f0d007e9e31b24f5b7bf69aecdef4ef57de58719cf9fb5e8f5366452013a5bb69c3f1807d83e26bb63493dc141ab1ae8eeea11c495650b346919de060c4af1a80823fb10b4cbc333b9d6d05c6a4c293a7fd524c5259a841500617ee442222ef2cfc71a0e4bffa87903ff5"
"31898a44452ca2b132c4a633c91c7a24bbc885a01001988ab845e53a350c3b283dda71360c7a9b47ae40f72737ab6be068ed8ecbde1d0bcaecb729c5bea691ba0de6867e6e6879fdd99efec2b6de4c2691ec9031189491a01329fafb2f0d0cc28e26a22bf55be6ca866dd4a473153901f244c63967e829d9ae2ed83451a365558b697055a3b9a6bcb1bb40ae56f13d4b60defeb1a06cc6831e175ccbdb92a34462e786ea28e2ff25b813b63b30ea3b8d9a0921a5a5bf45576b39fbab6071fb1412670c936b5fc31d668026d297c5b84739021c4e763686e4011a2bb7e109db8e1d6bc853235a44ddd93f1012f7168ba3091a2a92a3e05bbc761fd97ebfa22265e6c1c2bccaa9d327d4ad61de87d3b5f0c5b29e604f79827064e05eede8b574c8982bcc0439db27b15bd7ea9a38923a1982fa7063f9f1572963c75168d53756803f6f60604ab33388ccc1294fb0ea143fa5e128a060da40f4dfa0382906b878a602c568f3c99809cf1d5912f224b2adfdcdda84df149217bf8edae18fb4bd825900ddc57ecca2eb7d209ac44e06e674c2b7c126756bdbad066dcf187344824050b16ff9414fe957c37a048c3a260a8dea72f7a12bf5b35e1c2205866bdf85367d94af939bf52a3027e2c560ca096a449b7297687bee98e4cc56e1449448461d028e435fef26f060097cd96bd605d5a1cf6b1cc95c49037401878b85d437ee43bcfbd7b2b8c145c05a33fe01226a637dd677bfd28c8acebc4a30494917c253957462cdd5a3d200e350f5d92c5c57bbbc7b2392e4"
"569610f35e3707aae8a481b8500dc8dcfac689a018671a0f3634d18fc7bf4f7c58933da452308e348a446ade0bdd6f02d29cd8d273544ba46f1767873717fea45f0e0980339fc187acb7045612e95db5dd9c89169daccfef2e3a01c4d19984f8b1cc960d054285119f23e746d743a0db459bdd5803fcdbfe92137e80d47c84c547848ae563695cbf113253b8a96e368bdacf59ff73c023d043348c1dfaf143ed13424662c2da644c25b9d22598813e1973f30ab103c0ada9ed247ca038a056d18f2e7c8443fd2c95366b387e9ab972170cd2b4438455dc73619ab3444da0d64b0b2d3a9d640ea917b1c09d17c37fd587eedab367235e1748dad753e4cbc74dd53017ba65571a5a65269666df0a24bc694a2d24e862830e7808ea8ffc1fd6cf4b29564c8d77d9692d7fd55e496c69f5f17fe145abc0dd1818f2cf6eb979c33eaf41050901dbbe5a49c8bf9983b1284fce92703b45c4131b3204fb9edd58b6cda3918cc490051bf9d6751b7702e577b700230f1820238b959e46f7dc3a3abad842814c69a76be5376c1e7b35e3ad7318b3439008e4c3801bd6754fe67cc7aed658d89550a30cbb1193eb5d2144eb7f84c5c6ee9e13947daa3534ad4902ceb9cedcae471547bf95e2337760322b55af97457d23d174b1c6f3e1d3585feb000953e298e35aeb467e90342bc61bd05af59c72921b2fd4795c19bba268bc6bf4f18349ca91b89cbd6814a62dffd4684ab78e998f7e3833b51ffc495ca3e789e685417a0d972bf4192b0c50016a64ba839da14c3c5bdd"
"58a74e96e56c66d73e2869323093892c5272aba5e6edff5a8976c5e04976c8bc1b8cefa630cd924b5bc7d28dbc67b8aac4d7571623c4d412acbfdf61603d2cdf1bed6fdcf8d88519a3ce3c4803317587c4a7dd33147f66aad06554d69138959fc3172298be9f5f83748b83c6618758bb45058fab1bbc1434b993890288a42910b91bd52ac1abe775acb09cf7173ff9fdf0e644ee94b000c8ac5cbce24d424800a9df431e03c650b3f4196115f100b49b7a41f68ce27e5dab5865b40a0977cc1be995d3504dd3bfcdc8db2a57765b1a80f6cdac0db795336bc9ffa4cc163df1d9d6e034d5b246cf59ffb2f81ec02ad4c48eb652be03c97a11427ab519d8fc8d704fea98d597e44cfeb168f3fc1385f1a1dc5926dfda78be4c3a3e1d024e4492e952cc8471ae1f26150cc065bef433c0431128c7df6c57bd79dbd409fb0684137465ec0687ec2ec45c6fb76eb88bb7bfb4df3fe69421dc7e0809e2474f987a59980fdd92b2a66ee31fb9560b4657a112ae523caec636642e44b507ed5a900fd65e29d35c89d252708b7f2c2daa29062b94577b0406ab9cda76c921694998192078e2ba7a90386e1544444c228db678f9c7da51a06b9c0a22ea26ebd3dbd8880a6e981decba2f659ddfcd15af8d06031e2d8ddc587417ab536fd4cef49372e0510c58060f2900e030fc894f1edb6aea502b0e2642a8cb1e0d22cc11a43cfe8eda906711e059d6e4a55959cc337dd54428eec2c123f5cfe185a78f442266f54213537af2f4b42176951bd9b0d1b70c61ef5e728acd"
"1a5b0c8f0360fc3d4106d1f1a6a100326500e25cf6ce2c7f230e5e54526c3affad6bba78eb0a275ef942e441919384b0420571655eff68e32cd97a322e22765fe736eaf329f41b2ea005ad56acb4c092b7bcdbf2bf3e54b058827259bac8bd94ea73e1d61cba79deb078857c63e255da3b8ed4bf5d4f603d8e3e19813fbe997afbd272102aef06950ab6daab60139fae51f0fa8b48f3e056a360f074692f982aac57ac3472539e7484862997ed283dda8be4b22b83235299d1b20df4ccbf0fa24faf392a8433535d3f3cc3ad7453b9b150dae24b8c78f149b53f5394af065082540b46f6ec3e70e2428b873fa564b548cc1e39fb406ff897662ac7e901384b3094c328bd484980c120518a8504511644b0616215df50ce1ab6106762d52ef24d40b9851168c69b3068682525f1050fa3ae139c9500f89d1b5a96c35f71e25f8ac229518a79fbdbfafcd67d7356bfc3e9699f0e5a8c9fceb068f810cf2c8e3042b5fef34778a3edcda569dde4fbc240996038e50e233652eb5f303fca7f8f29c633684566f6548bbc311bd24d7e0ba95da8f02917048d9777e5f142f83cce4187ec1af72b6b6c3825e38646f9f29697f6fe3b3cd76*0", "password#"},
/* test vectors from CMIYC 2012 */
{"$dmg$2*20*dc39029a22b86bb4f930499578d0dc9eee69398e*32*bb47bff69b10ae67000000000000000000000000000000000000000000000000*48*c4559cada09552ab075e73dbefa4aea1aa21209011946e423ca707753a91c87f6c4cbed3beae20a244d33568f852068a*6*4315*504c0c37c600618fd54da114fc0eb24d6f24585568543126ac56c034cd8d7b3dd991f1418d0c95791e091921c02bf695b7835f7b0da2c1b96524e72b4bd3f671c592aa176b6a58de77a35a26bd1d0c313b2ca23581027fc52c7c63f37439404218d720171d3b178125e6ce0646bd6fa1033f2ab7b6849b3a35a430cbd1401f73b5deb478d6d0f58364579c208c613cb2349fb19adaf98be2d4a74a6030215793fe4f1129189626bb87c23d26dc2af51a98e1fabf2f58e106271c7759d104b9e5171d8f952ceeb14317614b7a14a5313029aa4068b898f7e0f5b68683feff0d375f2ada37f20135df443bae913c7e96a29c6c3388b4b51432add89ee22826ad0b1b0a4ca9233e691f71a5ae2c76b5e5a135dc793e081dc53781faa4f844928db94084b53b39f1820c8342b563e3f46b002bc52ced63e4588388e69c9e85e2002438a1a703de411717d24ea88adef3051b27def61e4b9a31548d3714c3bee39fed866254033a123429043d0c08a052d2999a171b010ffd119f90bf9222462508ac914e0a68daf93f63caaa0c4302c9b1f6447ac3856b09eb45096b3a294731f110b90826b0d611e6e045397b07e5aa64afd271f1c92664e648af648642f786c0c8aae"
"6218f4282d8efa713dce232fb24df4073a0e04edc86d940e8ad22db8ca751143743f9f12585bd788551cc7b70821b5c42b133cb7781f60d1b9c345e9adb122ae444be456b8e49f9bab0e2033019b52f2ede4e7f56cc1d1dc3a48bf0666cc7a4dc6b4ffd5077673f2f6761688e4452a4c11b82598cc0ef57213f6c7c12ecc67164ae501b3e87e25a361d0615e48cde249f0193f2aa69a1eccf029340531becdee8eefbddca18905451b48c1085d4cb965786d3892d7144841300b8d2722e92af50fb828cdd8e825dbfb16328f7cf792f311f84078d45306fa570661e1ef2b34d5d36de2fc4b295f5e84fae8d55ca22bc15764932d0c5dd3cfd914b2b8f67477b2b5139c822ee2c511a03f7e9c717a5e8eca6c4b54f9c3b7d85765a78f03b29fb979811ff0c655522b341bb54ae3bc412eb760eb689c6b4c3bfb85a8ce794946214c574105e577acc01d3f8885e72db52075d05a75260a6e4a54872d087040ff38f8942cf150c3615088588cc53fed11040bed573c0e9ab14b987f9223ad089bb73284443f61ffdd61616b8a783e85618217e8bb491a31b7050421f4b0a0bfa5003775933db00e47e4452adc1433da2603f6dc5b9dfe58efe458da25699e512660ac6f1129dd9d7b176a24109c6e6e0c201d784addc9c7f8d4f309ef6fcfb02493abb7c836ba3a371e64fea941031a59adbcd4ef59f0dbf31f361f4282a0e60ced4d9d17675b0422faa1c2f932cb525ee07df7eb2643a67963aa99daf5b119884557ef1585d81eac5c8acf32438636a10d043bf"
"47093fb53a5b3ad544a38fbc3588bea3ed616167a79b2133efd8c509f53626b9cd7b71828fbd5d61b1df6ef3713b5347f65e7c0770715ac1fae561cc548864f9cfe281c6e5770f053f68ace64702c81c97976f471ad11c7551789ca21a4d5480c5d3528503f2f7fcb268c34498888d5fd3edf1c71d12581c393db2ff863e22c1f6c037106e5928aac9118702b45bd36782b2295782f93458dc120e79cb3d1632c2c5e527e56060b79a751cb7653b8c0ed2acc32168b56fe5b50ff9e49a71dc9b82f812b53e095660cd7d59c04f31ee47773a04eabccd7a4a6455ebc7d719c9eaedc4e6c935fc99642acd3e60e0f564efae90d7d1308d6ddfe7eb89520c234cafca6bc7e8ac96ed401bf96e3c9de704ad124b0f9381f22d9ce846fad0b14eeb5f93eb0e0fd0657c480fd2a1109d735f3825db598e2aa7e624f282673947c38aee8832ec8d4dc5d6a7306e3477ab4e37588788109a3ed76741f8f2a796d0f5bef8247eb298fb973c4e5d13666d87b0bf5a7a553f208050dd7140f64fcc27793ea82cf58fd86ddf805a700065888bbf6b5037815afe8c03eaea355c90bbbb448de13773e977fa4c6f06e7695e80882cdac40301b537fe254eb1ee437a6ccf3efa68899a7188e6829b58977917a9d6124cd2af7cfa567fb85aac9c6b971423681a0b6658575ea0dd32054800e08be5683faf46165c56647e1c346961608bdd8e6f999eb033caf73f000a71961cf2fa8c319f4084c0ab499caab87d13aca3f057d17748522f08b36c56c1746e49d731f9355100879"
"d7d114000293520c9ce71098d26b2114030615aeedabd5a6f7fb9a91f98b7ff00ec72c82136a00e5a19384084e0aebc78bb3cf05c3c1e3872f56e254c68694d930eeb46ca8e99329eb923ee0f1b5af0b7276e8600e25f18642247111eca41da427e5b9034a6a22627734ee024c2e2c4277edcb3a0309c3007c19416fa131086eccc6f73784e1a008dba5166e7c8aa4cf8efc3a4e14f59d665800982e46341b9b098508510c7dadde295a784f7a7085f5ddab5b6881b305f99d87ce3883e557280bf2a1f3adc69b7cc9d4f339623d21d569230e57a2bce611de7495d403adf451725d7ef11df4bde5a31a95bdda0d0c2a7869ddeedf2ca7e1986ef430ed44bff6ae6e44f740b2c65364477ade4dff6f4eacbffc67a2e0494c81e0424bc9220bf20aa795e2b20db6076667088b6863243ccd2bf897d4b6e1e58e2662cac593fb9a86220d65964e7f6e0f1987d07a4a8242c41c001ec38ed2442011d8a56919800b4d590338eb8db02833031ed0422bc08b11dd59b59f1d301e82154803076053464120217ca64bacc02465cdf629732cf709777452e177f4a4d1015fec4c36337ebdb8daf57f19bfeb247a27131ec5280038f3d1a766e071470ffb685cf4d9763b7e1b5776589874f3cbd4761d5fd35638918ad144a4a1bcedab9d652477951a716e4073cb36640fc257031f06e4d6f586a9a0b6172727933179e4cd433ba940571f3eb908535a12e9cc3ec1e8f8aa9975bc17241779d972a8fd8581dd3850905cec48061dd5fff1b295757e38ed8568c3a2967"
"ba271e00fb507b10bdd5ac5b90426e48e596ed430b5a3c554ca1cd0d18a90809d8db18853e2580cf2b2ca52ff686b7cf360799bf69c008f87191ee372b44f96696a12632af003eba51adf1e6101628168b92c718c6f7aecb765125880f180047ec3b89fa23bf57e4fabbce38ef0fcba829123f0a3ff527dad6d6b5b0c4b0c4c4cd13787e98c829bec08728acc5e90ddc6bcfe2254eb29ae8450ae87841a39958ab80a38c8a742de64a44e25df0360a9e8672148347d7812bdfcd9037723edbc5fb4a8bba689dfe3baf113778a498e2689e8cf1ad194df422838a618b0cb222aaf020705fcfe1475a8c205690379cbe2d0b5f9a0de41a4d2e6ff85f1f19a97712bdbf49bb90051ab934407bdda9bdbc1a57b0e874f3b2a09df45b7d01bda15330ccc57a752deb2751e495e394471f09f33d98d8face401d418affeeab86be36cd8cfb0f435d9939822041f256ad860733ccf137e582e1cfb5a8b96ffe646d1928657c05c67b8589a90fb32e078697fdf8a3ec58dc6d350a7f50c83d09e5884317829d8e850b7fe17bd2ba4d7fd94b86d060a3a97880fb350b95cde4542cb7d1a2f44f8ea065ae30fd4d4b5fb24f787b8462115b3a918155bae098f0fd7ae2d4646d3731d228909f690cf0116e1ac15899513957834e0a74d8c07f0c696cd3268d631ce1292f66b2633a3287a7e058781aef9d3d566e4e41395fa7e1793aa9f669aff116b99660a5a29fe127a0459eacc3fefa4be95a13499dc844d9faf72dca38d8032932084faca23e4022869f2034ace2de0"
"b286e71f2b569951214fd2eaa3d32da48a234265acec4967c74976b5b5d635eb12cff038a4a23d6c8e86a11a408aee5eedfa7209a8ce8d6bc10271e4b5627e16c5f8ce8000882c461de0113efd8ae9cec6ac4819ab2d6f8a9f189fa2929807fb20a895204edad9821d180c54e865548f9b3eafd8073a734e61d574923f0d1f69d266d970102434b0bab705465833ec9926b03798fa8a95ab98d35863b7490db07fa1abd600abcc3718d105f26f96d20e593ce0c82efc68ae65d03e4e2ed3faed27bc5799e359588fa884ac79c1ad4f5f8bcbc9a2a5605f97551710e2e416aacf149941265406490d32cc6bdde994943fac2102e57785dca3c20358cd431cee285768d9eed6ed32a9919e13f1a38304db6a57f637b6a5c8adf4e829baa82ce674ec7444fd9f7f1807b8f65d4b68ef7b6c3fe5bf653e81525f7900916f5d5809a52c070256e6b4cb332fced5e460c9a2f62bd73392bdf4522be7c211577559f59f62869e0a71f832ff493fab76bbe70f3c0b902fdf45cf49793afdb87558f1a6ec289018035d861990eca1dbfc412492cf86503af00c7db7a0a2c6374eed42b440293938a36f61e1c4c187cd50d974f2a0989b05b8ee207398560b516aea520044e37229fe0efa8b7038441fd584d79c010c0f31030d60eaa4dc1fbdb5a254c089198bb5eba6fe20655808c1d22b9604af1247e2b820823b3c622be2b01ca5f16f86af880908ace8765520c813afefef18e2c112a72fcd4760da91f7d1066cb5c8c902745b83be8defa193bc8b6b93a82efdf17"
"13a223660c6ff4dbbbaccb1a4e5482cc238388448e8b9c24c9aa3acac9467e1f6d96d6deb1cbc9fbbf77b7e756068e22bc3b9e6c275987c5eb99da6a5e2d90a1e0558c4f9fc392371c07a7844cb947b19dd1a6d9c1ebb6496f36bdce2967bea2971cc1c6330b1c31054c07f8d853858a46ae9370ff1d6ab755beb120a61b4774fba521baec6fe8a079862a0471cdc5080c0f073f7e3d33f0f25978d098f61bcb4905c776ce6c0562dfe08d8b9f17de4bc2048d962ad7f4baf132cd0152a904fea9530e7c1f52a85c0188d6ca38ff9b692b2a68204a6dfbfbec06f2d800b4444503bf2dde736be4108845c5a28909cdb42391b5a0207c157003b8dbd4e43996ab5017c5f21cf0d4d9b3145c0cb70fefa767b4689cb750fa7657c4a788b7759f86496998fd4b99b2ad1b2918bf330c1a81e8986eab031e9f86cd93b7d623c72e1a394f0862a193f21eeb858524477c3192fdf5b61ce9dd5b0bf3b3d7adbfa828f1a9ecd4dabf5e318fc40262f0dd204f28b934d1af7b0d7cbcc20be21f1c7e04fdf76104767892404b14965bf8d53003ca9ff0a8f15f5d9b2e152a662ddd8eaf7902854d8561ff088fe2e880a18a036d06c29997dddbfaba32ae4ed70b47413c2a037122d830d55bfde89ba645562cfa1d29f428da108d93562bd291748a728d1b3090b8a7f56293a3135f05d6876021e92aeede437dc7ab610e1e5af0a00c880887754d76b42b059f32f9159d25ffc56a993661d06a7973d190fd10c4ac998c8627b494444389c529e41982726f47135212b67"
"8b69ff36ad29e225856ad2081bd393249f469648e6ea4445e0011adfe320b4eb5cff1d9332c1779edae5d5d66931015e793f730be8482b5f488ca6372edfc71abc4b8aeaecf8051bbcc848d736eb0aa0d7ee4cdb9eaddfdcd4200c3e2f58a97a162565409abc44b8e982fb883b619fa80c7c4f2318954767ea1c63c70124f4342118f2c798adaa7ab5f6ebed1b0a15e12f40978ca8e5f0972a47cf397746f9f482902abdda10ee7f4c610935070f888b5ef8eeb07933e1d6ecaba243fb475b4c788cf8b453638ac43b9f6eb74654835678b47d9437a14300a12553fdb10daff3690e0802dab80fbffc401422a465e10e6414975358249d68e4ad5a1f1c93e295bc10b8c5c11ed98c7ca5773014a2739c0592dfa30d8756be1f66e4fcc01beb2dd58d87800e71d136c12b8f73298cd37b1bb5758376b2111921fa9f7040e69d3620415ace96ebf29fc1a87e392a9e701f4075208a1a8fda7a59b28997c017da70c18d2bbb5c91db86d701cae85a5742842fafec723be9d93b4225619c7188f5bd23c900ef3863068785363ab861b58aab8e91b562b26f72a812e7892ca0bb6ed91086a2935ba82938b367b34f70cbe40c02a8cea92a78588f90cddcabd2738c9a18450f6d3a87c7f827a1773c2c7629452f64e1528258a8ba75bc53245c705246963369f1179a765bed41d*0", "654321"},
{"$dmg$2*20*0e2a3f19e5f9a89ef8371580fc08738b0dd02ee9*32*57b5e138dcba821a000000000000000000000000000000000000000000000000*48*4a33cb05d5fc441fe39477724556bf2a3445d2826dab91031374075f9b5cda25084769a7af11b2e678d79514be8e5f63*2726*8192*585b8129cddff9f9f5875d62364faf4dccb0625867ebf2cf7ebe08913e340c8bc5b62e4c4152b2274a19c3fb7d0f6ee32e7b6c502073785bbc213c28890b9910c878702b2e16ea0c0b0ed1462b831b1eb02a0a5ef586de3e1bb7b5f70b64e713f2bfe7f401ccf0a4430981b89d23afd47d05d1d28d64917ad2895af8264350f306b7a0b67029f6da75fc60137b99131d3678cb8c596295bef4eee92110d09c52cb30486709fff75b80753378918af4db98e69905245ec52c2c6ce7e71ea62b6e530269af23836fb40cbe12a1498d3d4e66ac26b04c31d4a1cc169909f51c0468edd44d051d79c361f547d7f4891195b96950ebff98f70b36106772abb775308cd6d42fae3a60d748330dadf7ca90bd474d05cdc678a0cf41a5f4461285ce0ef0a6df3a400d0116d1d1f17cd10be2c8f164ffbc3797dc022ffe52b69f0303526d3a17c113a56e67e54b4de121787dc62977af8bcde3f4fb596762ce31460a6f97d3d07874ad42f97ace146ada9b63f579a411fca985d85d64bd3262d1d2ab5721119b0cf8348abacf7aae2f57d3b667a5997d0fa448d3da4c51a6f59c6686a92a35ff4d6d951dc74acab9d956e9a942d9356291f56046c612ff09d1e10d8a0c60"
"bb2a4d273b03962f5399ff455ef480018dff09125f6c343f28b13acdbe7f0309e64406d2c453d57d6e78f10caf01d8dd274e0ca6e4a82a208750de92640ef97f67dddf90b0c6de767f185b6bf17a119a735cc97075b93fceeda807d0ec20bb4ed923ed8855202d7d285b767727bb5db55241cd21cd5a7353cc872f0d4a00fa0a50608eeb4cfbda71109a4a2ae97f2c01a40c4968c32ff2c01f05ee768b2ab22f12697805396916d8fbc1b06eeb320d619b0e472b763e7a72acd949e17620f69839543c3852c83e5c3b1cbdcfcfe0e3507a4fecfaf3f27118b6738ae8e33801cb1a2b4168f8f614dea5e673878964d6e27a1d8d8aede3bcf366400cd0155cf502cbc04234a2a418638531ef13c48917328d2bc1736e85be9cd80cf0d99b98d0baf9dd9bb3f840fd15d74788043be9f791540248b5dea621487810371995e5fff578de770699ed8de1f5190cfcd5d47320594299af29efaf204e0a411670c6f4f60652422a7e25ded5fcf26c1d83f805938c1ae578bcab6ea5c679939e5fc6593248d6b8fd55c454d2c69e8c756982c01ff76b4911ab494d90df56d7743f4d8017423a045eb4215963317164bdbb473620e8a17507a9cf26749c6141ab7b94af974db92c875ecfc4ba4421a37da4454867ea3f7d8580185eed9ae3271050d039c25f7b72e18024f91edbf3e1bba71f697c8451302b1ba97c8463b3699754fabf472ac399bd3a783b51cc945051ba1b411ea8093278606efe2b34b3992033fb773fc42cef45fb0482992d5f867416faac3912b82"
"eaa852935b54c1c05d2b5be854fa75ee754235ff1e84a53564070de838fbea7704fc249a98c7fd8a4d4ffdc06d5fc0ca39071fc5be83b0e37591e14ee76379f4c5ac64b21f016517ac44a12161543c43d40a8f92237c99de44ec220fdb502d82e96f01f020eef2752279a5aa3d3928a4cb594c5e145d016375e3d7a89d2bf12d4daf3886393c31615fef9e4201cc0208821e932e8b26df396e7c29f2c0b74c9f59ab79fa44b4f9c1156741e3da93df51bb23b756657187f1902f3d5c79aed88190b4a5f814ee1010b2fe82a3edd867457dbbf0598566d80261f83db810d058e785261635cfd1260c6b3b43081deedbf0b2a30d801618090d07340a6ad528b73c7d652efdc48fed161b0a0529d5d1e80fb0a63411d53e75e9ea9873d25a3bcb243faa406293f53a21b37e80023a302682943a30c8f1a5804a3700fb92092677602c39235246f359503cb79d2e084cccd2b40840acc7ac7b18b4e1a665e3833f5b4aefb40f0b36b70dd6b125ac9999d113fed15e5cdcb6ea6043036df3dec7f5638379971758e50f1453af5e48ecddf1d46e575cd2cde1b2091c1797df41f152fa77621f69169d42398312155caa88850800f9a8792c364021463467248e385bf45cd40c7869efcd6e9a24152bcfc8370ae901c7757a19627573a8832e5ea62c344fcd60230a3915561b6fd957750af61ced54ca1ff1a8edfe5ebbad51a79777ebd4e66c63a248687220e66d923c746f56f009f9d3f1f186d987c057af87f7a70a213c9c6eb93867983c3191ee956c8991275c5"
"5b07b2ef0eccb8b0287414a154afaca67f218ca43924fffe6e6161690756e3d6a19a29ca972987f603727397e5f4fa19d0c3f1e74f026d35c028bb81450c7b5493a7d837e83504ae7369a49b2354c6c6219c79ad8cf9f5bda3765541d9691b84d19cf1fb9534f859b58257e80a7548c12ca2c0fa34b8b6248b30213be0eb60de5bd04621c163e4ab00d80adec931ee00288fb98e5eaa8f6ec83af863b8a3634f955b54aff779725479d80f2fa51d25e721b159a3dd814db70836a32b3a4e55c4def271a1918805f31fd3af464c01006560b36e1ce0a745d3bb121710083101d1ee469b971400d49483b6c4d858cee24614786f227f320fe6105d61fa8cf21136e9160770167e1b7451a3d9171f56bc436f097d73dd4c21c245efd72b63fe21d1600213ab4f2250e6c5a16cfd3823de93c9c56ced668faddb77d60f4d4d9a9a3b3cb9de0eb5694410fb760b7421cbf6e40ca4e8bfd4577fc3528e0162ea4c9aef069b3e4f199120a10209a6acb1eb6e39fbb23896860eb1366c6eef023c2bd63edcf73aac6094d25cf3c1cb0caf82b1010503fc8e09bc537e8e690f8bbc0ef492f848f77442cbf28bdb42aa8932109ccefbd2ad6563fd3d315cb79a0a5f04772105e8564e01c1e22f1c2ab98813979da0a08ee8812acc1c18097b8f1fd95424ec0d1b63a85e84257d382400c5f44f570382ae8128fc0935a5f7f518ae3808b79ae7aed4990edd9257ccc74dd19adcde363d4c7e5a4594e3d3ce88d308cbb48fe26edad968cd54cb715e460c7b421f6debe9c70"
"3bd684a52b6b9571a7cde4568d7656e9bbfc5559d2c60e11054cba9eb54120bdf13c4c5103fc777033014404d6b4a65ea0a716f76a1433ecb904e9ac28b0bb8ab5c5b0216f62c18aa29b685cbe1c9172d51bdef81e7ead1ebb5d6c7cb078fd32cd63c72b163d2848de4c6dd59b35e853d6ec578b681af969941c16692c9010576f6f3777a24e87084c4b78a8502d083c137237a60705080aa90b2441e2f01ef9eef5b0f2b25b2b745136cb143405fe5c7ca013f88392428868bd9f06bbe41872c4cb1f98b16d74d064e66b0c435b52913b8153d47f52fd95ee73ab1f25f1533febb72e9dbf65d11a7568a17d2e8ea2616019297846551c6a3248b0a23e91ac1f38b21878a28f828e8aeb19893478aa2ff2f16833d1b69fbffe68b569afdd1980cdf6d8d4ff52d9e2708568db1a1b50847c8310e4d85dc73b59ee31a63bc894712f2d2214973c2741f4db4f3ca9a337e1f6c4ed3858370626b62e975a85e94b498f8c3c2073e6d6fbedb40e8a356e6d6c77c2b5e13ee52fafab4c8d369ce17a5c40deb98c98b60f433889e092d7da5e7e991b73c15127364d70a879b16ae774d65834fd0029c3a1239143b6398bb19ecda0328f39f39ade7a090b2c5c4e75e4922c50f858195c7fad64e4305d04dea5b85d4dd5a52ac4e60681c2337d3a2eb0b47745563f69352e1c17b08a3625f7ba530dc5a393238b6a2b92bebe6b94966537763ef66179b5c622ac068acfaf796ed4f4214d7fbb36eba5c9216cd5ee1d42132c459042063c71a1323eaacca0a94dc119145"
"cef90f744d16226d7168dc9abf46551dbe25ce179e85bd44cf15374ee498f3f3f8fb5800c6cbfc427a834e3f7b3b6b6c7333c5ed46eb2a0c93e4eaaa6f95072221d7cc27d36ad53fd5fee1e65d91e37957a9d34901602d5f49799db3cb4e47e2c5bcfe36008ff0fbf166d9e541504aeed187251b80cc72804687f58b646ca3893e8c9e4340c9580a2008d268e07f7a0705bf062c6b1ebb3a62a4c961ad2f65ec9d44c67ad3a39117d2427d9c3d067df7c089bbc905b319b30d61d099265de1ff42a97540bd08a1ec79a4cef4f692bbe54ca6f95d6ecb82d3ad2316d6cfaf9a66a8b5e5f00847b55509cdd344ccc3fc640da87be6cd4ad8ab3e510b31831d3151b2aea6675c97767076360bcfe1b317c3786dca2e4b3e90818064abb319cca7bae051390063bc6a0a0a133187a60a6eb82162a5061fba5fe17f157e9e589ad83d2f1760f4055879445b0934c954622476c29c9c577c053c723786c8d25829db7a896c66eec594a6b798ed278a824550795b0904e154fc06ce8783a773a8919b624dab70f92000b832475b77db27d0b5bbc5578765adaeac6f61166094fe11603f37a41fa047156f2e57d80a47d110901d96e33b5247a587552e37b7a0712cec420a5680ee8e5550ce5d0996b235b8898d67126415184bc9a0ec172d9f78f595182400c010d905fa73b5a6fef2f722b7f9dc51b9d21d85ec554c9f32612fcdd89577c47b3cb5203132e76ed5a39af7e9cfa2c92369464e14f8333fc29fe7a662b9373011f0d4627c9ba7b0ab0c050d0e67c625c"
"dc83a0e244dcfc7f5b58ceb0d1ca2f16349ad8b16a48dbbd63da41eb5d0732a13ce5a7ee7c9088739eec6d63e0a410fb53f83cc75915c0b6353a75fd2d219986ee35bd3991161fd054f0d39c2c9da696ec2968e801cfe726cd512ddcb6cc28af65b1f8e542d1ad6a6d76dd1582dda6af4f6c9363ad7117e0ea0102cffc1ba0d94dd8abdb5ac37ef9b444387bfac2b811479086e550ce3452f77461febec72ce35d06ec70b94779b794dab1a3fba727f364bd0a65e7255da20d77ac6b85ffee926a1c3c635366a4d5c8233b798e565752103c66d5e7f18f315f7fe2641dec5944e51e373f19fbe1b34dd00f4604a4f741a5d4a8c720bf4e51511fb3316951ea63c3129c4f6242a9014a78a050e633ea5bf85960fe340c54043d9bffb969f8abe458a8c9dd02e9416e0f3504a5bdbf6cd0b4013b4b548bbe59a23149a24296e0c326d69affa61a878baff7525bea12a4bacaee6c216de31e22e218a3bffc996eb7a3b8570caa06193b56452ab7f3430c758c3b447db98c7a1faeafffa497d938d9b952e3ab3f6774333a02742375e7e1dc39cee15313d69e8cad1a251274ecf48f273cb79c58aac657adc8d77f7cd1755ad9a2fd43b69cad9d2f8bd77695dac3c43d2469e4ab34e26c7debaf33eb2ca6cb7fd0a963a37b7dfd5304b9d5f0bc1ae0940bb40375001e9920d4956f4011f4f1263c3b7cb38afa1d8f7c8c188bd226ac3e23867f3989d76a402a9476756e03c6c3bc4e3ce78095125ee11e7b47347bab7a638b0088a3b18f23abae9ab2f94650a30e2"
"9abdbba8ae9d9d03cf5b12ab23f5a6464547bb7078b91f533ea06541941483359a8562e709608e0c5d1da2c7206c5af49be0df87a3244903293bbcc121fd2e20ff909a90ed836f1822ee2b40530084f02bd9c42b350a4703851d197d9c465485112f1bbb21aff46daef510159a1f354e5fb7b11508a3ffe12577b40d3bc16631f8a79191745fe828303cbe5b6d9578cd80f736971e1f108f02039e0bbcc12b42e8860cea15cc18505c3e4242ef481930f3e2c4b64ccedb5b4d9837461efc7c48f8b1a6dae1041e696b99fd8c9108ac1fa9d975b4d5a740c4e5bab92004b7c91cb64e80a67aff2596c919b73d88943538e0996a775b88857187e9f97828f8661f89252cd0c5577b27151b5b0021f17937a9abbfd8ac3946fec79a4063af00802d54eb08461f951cdbcec92f593eeba457f381a7a98f313ba28d21d2574fc751449e1c3b497e09b90f8e1840e7a56159915d98b36647dcc15e1b335102074741f1dba46f0df9e7114ca29d02a7e4581fc45c48e6b31cb291760a05774fdfdc0448abe313ca496bd2d1f011f4706072d69eb0207b0289f5dbe4d1f73355b206ab3d5c777d1d9dd65281a0dcdf598569109e8fc3b56af94e4340929457d2c45d9a9bbc37741dc031136a11955a465e0baea8c11c06ae9321dedadc498570efc3191e67354f0cae6a763e84aaf74597dc1d329c81231546df2fd965d2ce0fa2026e0ca896d48bf8cff97e9e1fc5e035a13a1dce07810a9e87c21988d7e9bf19dd68379f346d232f83d776c36791ed1ede88f8bdc1b"
"62e3e7857fddb802ef7771be6a2428b7bb7e419cd95042d7de60359365efec7397b4d7fd32a4d7e8b924930606e7adc49333809812635939f79a20eae6066fc494ad27aa5be989663ed12f9f1c82d092b7a4af546f6dd33ab862fe21cc45c2c7c58842360070e206ac341c26ef2f92cc7629d873a219ea1177ac6354e7192f4c3f3aedb580c322e1644c92b9882a96addd01a35371c07b6cd3d7e4e38d089559ee41bdaeaf81650dc263a69fffa6d2713d3a8ffcadde7601cd2a87c23187463d3f3305a36ea01743d2cd846cc5ac96c89241c86b3c38ab97f1ab7b9685e68260fc116b7d02db8cff929b871dc02379d203aea4160c6302a7bad3379ce2b77effb3f9eb37d7826181ac8f606e67026fac0f43e39c72a04a6278f89d16a6c14c6d6e3dab80e9089a83c7a370726fffd0a2e6a9a6a950fad60982eb28b638ebf2315932911b91e465f076e97aacad4c6e19ec46a8ba9e7a19fca03b7796cd6d8efe6d2fbbb96b3fd3f85d4622fef029819efb34abc28143faf10ba4879fa69d493908649f03853ea84bf7d5bb21c6c541edf0c0aa96347b4102cde3c27a58ba0788ac02cdba243a3f52e0ce4d682d41d432e632635cdce5be1542b6b6a8708e144a6acf80ab3ff5842ca2db90e9d75401cfc99746a0919ed81983d2171b4093b1b07e5e5c45992f657c892e91c16cc6017a66af6466ade21f4b378a6fea6a8e4bf000ee986bbc0a170467548e7f6e797381ee89fc431f7aa562110555dfa5c275523c202744541d51701d70a8f3006ddbdfa5f72"
"9563bc0234d0b2759efb747633221706cfe73d47743ce6e6077943ef6d0801729e1301ff9bbf37f50667909f1cdc70f95040c841106ce566de5dded0fa485ea539978a88ca8618e566e9da4f2e215d544ee62accbe75dc17ea26962d78bcad516e6bff3152642e346444db494a909478bf6d80aec53f3ffb3311c6283711eb96fdbdd8e6d94c71cbfb9d7ddc7f092df5092199dfd822b98e21239bb8dd17f0c101909bd38d309bb5456232f5a1b731990a4cce847394fc40b859a8d89c7c02c388e7d6ad42bcf4818de33d696ed6d6ace4c23d51fc9d7d82d0602dbea094aa2db51d9aa8ef5c1f4803e40f6f5fae44da3c3c6ce9b1003d95300871353762062d1ad49a31cae73d569bf07d147a0c8d212e60b1be486df08bc353a2e3ca7337b83e3db43be03147114c229fd32fc2eea5f64d5d5d9848709ad7335dab3909c1232d93e76eac218e7e0497ad5b7b1ca8d9ad5447879b20dd370398eb8ce4bc6805064ccdaa6d8ed1e98e259b7654a75848705dbf2c3804b455a9e3dd2890f8d74f0e968dd050ee81af2f98fdfbe831c16dae6589b9b2a16965713b8fa52e5d2d4df504411ad9c14929e560a5f7e74e98d72f71223a5eee41a40d85c177183c510881950bebd3f0ac907fbc5a4efe70a60da6bdfb6870d7fcefe04fdfffd1492c5033ec79b8de002c41895ea6e84393db391b9692983c84148928ba0fae6b2ee3aed2289a9e053d47340b5faa4870fa632c1b81c516a58a049728f941f57bc34ad53c236d33dc2ab6a196e896968d0a2bf651889"
"825b8f358ef4874b0e75e39331e513c506b29a61495e78722bb25475ec2ddcda0816ff634062a54721c9fb425ff286336e7036928cfac29216dd0eacd3e5328b6979f831dccf403e87ccfc4346f5743d972d5047f6055bd86c98b8fb720a3cc3f459750ddb870a845c1ff4bc3499b1c92b6e591eca7e94f1f8d2fa3c57fc97b573a738f7f55e3b6cc975a813ffb7f897930b8de8382c5883ebffba463ce72b0c50c721db403cef01d5be035730ac3c6f6a3f78681218656f397966753c04507e08a09f7176c3e37de40b9c7faaef1b675fd083c9cced4261dbd4a289f6aa0ba04964e1a6d328ef05786933d67d6da009aaac7d4a8ca31df5a15e3874eb9b288edf7d794e1abdf9e411c5bb87f7fb27f76bd62968bba4d53844e76487818ddd38620854debdced8930ead6b46f3bce6009683d3ffedfff0be83cd8727bbcbf428c761b79a3c06a7c2de7b99394030b51eeb954cfa3fa307a37881a8dcbcedf9549e2600b72f3665946d14071d9d22894020346466bfd2062e092f21e38e920609df77e3b8ec024334c9708a415d3408e22645f06cd6d805e8da2f4005000aed542aa995816bbbf32597d9025daea32fd07733e080188d6c5c7af4ce8b7bb25d7c""50e9f3cec80e86a8f9f6d4e78a40ee20fc3c83bbbd07020f0092cdac8ffc2d52c24166d78da8ec32ebc49f815264c5ab29ab84f3b44ba75c06b80aba2966a617830efb08fd3fdda831fedeb67b7d593c661538d422e1a9fe378acf51b0f2a07f34d84624e0b90af172e5976a237a7dea10f"
"a7cbfd3203d1b4985a1af6c2d2300136226b2edf519fdd2b7b5e3fb5b0c70f2e3160305fe9dd0c09b98d522666e5100532f516bfe24d12d46b5decb4d4cbdd5fe9cd647006c1c7eba14a56262fa7a3b7b6d7b22032c1d444fe023d66b7f51004c6176f4c198a2998beab66ca70e1343187ae697e9fbfa6ca6443d617552e6b7bb73c59613ce0a7cab58545bb40636f54ccdf89c507098680f4486f821b2fb2c7baa182686b0b6f893fc9575df701196b14255b547b925387cacd5f4a762b1d4b7f713e7aebe4f75ed648b8666e60a4f8d92f752451d704e19aa102bb3dda418c80f3b4f395965ec36fd9474088ac213b38220df73c8159401ff87751bbe392e0aab031de59691a0a77ba2ab7cfbf4daf09fa4d7d61dc5b456dfdbf7a60eab671ed1f1a67fd58bceb34e981a2dc3c3bb8a7a14fc8443b47a123662d96b4df2c584856ba257f39749d51caa70b147d50c68d4aafe51ee195f1ccb99b7015de726b5f0e85bf37617138d2b24d1cbe985d8d1cbb40a52e4c57e20c799e2f5ffc0557be9d3e2bc5b99dde628c4dffd5c8704c78689e967bc870c0fec80c3c69a2453b052a46e142309fb21bcbdad7c6c5a67df409bfb9899ec58ff0973e1813f47ec6428e35a932c117b5dc70a8f5b1a9fa402d59fa45714b4bd79bc214d488939f997add26d13c147aa4d4239d8aa0e3c70994eb4a8debb7cf292b3ff59bc36f97a9acad107fcc556c24a309c4a15dab16a47a71f31324dcc8183fdaabe1fbd1cb3808c1c35c311ea51188759d4e1533d39a9547f"
"04054e2ef994c97e213669f08db02702dd8b54154e7376f256dedc67fcd3dc48f5e0be91f1f88766415d203bb4bb11c4a0f6d0888e0c98d3b8519aab741b20ced0e02a5638e40ad2ffc301318a77e57787995acea46eb8ff7edb535036c3b3781d63a02bce56499cd03ae75ba6610ef27124da36dce85ad406c82e72a0319dcd6e05dbc66523be5015036de859af45be32c664c18ad712bf09d361769be3e568d5f51c943ec2c9f74077cb9f5757de92c643a2963d69c2cc3f010908e661f3a6ce202d50d72a436319bb2337ab1babd4f2cf1bffc3de25a09dfc5cffb31c7080c5473b4ff673fdae11e64cd492a784a106beb65bfc01f9b7b97384d877d9f4440b7434240e98656703edd66279f1bd5b7cfacc8a6b511f1db9060e813f2e37a8be5de25087b0520e7729a873e125d7cba84b93cdd333e8756630d9dc9e1815832c8dba1a3c51776948b184a916ae44694664192af75a616387f47319bcd5da1d94fce857c8e76c3438ae5c7c810310058558e01b01cfb5676f1a5a5d027bcd1ec62428a82b78fdc9dfe69ae9c0301f6f2dbf1475e1cd1804d05cb04583ae62efe63a6f1d20d5c5675f4822ddb8f6f6af3d639f56839b1993dc40223341c04d829849dea53aba7d0d2a2db0a89881a2ecee4f66698aef5ebdbb3c6d65ff03cc1a00b714112f0b111e7a97ded2abde97767e0ea6e19a04f96d708d419f457022ac21715ca86305b8d5e4f45d6382c7ce8d87a8f0f2f1a18134deb9a33b334bc04697479c4f438f5e58a62a1b22b49580fd46eb4"
"946d07c505e9c778dc56524880e8fb565487da236bb1340d92dbe21516f40a05dc3cec3fa4a56bc93ce57e7be50ef2fb38c94790acb9702dbf2ed30d6b5cc1e0173ed4c19e2822e79e711a523ecdeb6742d90353c904876e66b30fba8975d35418f0ef3fc8e5621d8d243973addf756d1e4621618fcae42af188a22f47f0f8bd0e821c16c8ca2a15e35d855ccc5c9660ebd2fe8966e6b86326905267b80358328483d0045fc63af4edda4020ecba5853f005b9058dbb81092cc12ebb3205ade902cef207f783a3921225f3a8a108eccf02cc303b11a2a7db60c897f31480db900fb1a6e1ccd1ba0aa61214037e50d8eb1ac777fc4a467ff9b9ffcaf34fe721300067d33a25f9acd43888ba09cbd26e8b269fe84065b5c44fdf734545fe21689b838eec4a00860f654df33f87d0f115a6fc1ba4f0de641f06eb8a19d2e75aad7dddc6f00c8d598015541fc8bd22540b9bd3babbbf3e41212d35cfef1236edfa5746b733de738c60901b87bfc3a4c7d49eb16e7fbb7ab93083cab5c225f79ef03db6d490169b5ecd2791fef9045e017f9dac41dbaf841f050729c6adf789b8008a82e61c80cc4d06207dbfd6b2a9cdfb67ac26280fa9ecc298dac1878fac6188066b9d8637f772136edaa7f64fa491b0bb4775656f5f1a3135686205b8217a590c088cf448892e134a29ef4cc61bd76886663afb18ad504b204ea52ef61782ce9ba44fbf2e18e1d59302a1b69717375be70a295517b069d26e161c91ec3a1a782e38efa6ac867dbe488cfddcf8c200135b059a0"
"da4b4dbadda9b742b906266a879da79da144eba455fa7cc5062d326996acdddec0eba8666b0e1e6c7116a1e5f04f1e94e5d85b77b2d35deb45402a589d46734810ba3a74414eb53181f75c2f0bad61d9f4aaeb94f30a1051f5ba2b2b30f1445bfe889da81e550449d863cd5af77d49d344b63666df8206bc04686ebdaee954da5f14692bc2bf1b4b01cd6b2bfad93dcc7e5c08a5059d047f6ffe96a17c828244b234a2abf28674b15d14b735956c0a9bd438183666d6926912358edea95ac5b1b6a53784f47819a3cfd4ddb9af8e74f30e06c30e218edda9eb8207dc7cd931d6e926af59f8238225dd037b47c7a4c8af558d981a7c9a7dbae3fb66345874b27cb229f1c82b841cac0cad018e8f75d0731d5a8ea0c4d530f575de7d39d77fffde64c9d1fd87b9af3759d8a275d5a1d95f1d2d0bee007544f5c39ecf4013c80cd89821f79af3979f23dfff87d093b85b892b93bec546c5eccabf41d04c65bb571543f2312ed5e3596ec5d6bf8e57e9854164d34b48ca0ca4044a526e038332348eb801a6ff342bf25750abbcfc27e7cb5e7b026db3743b210b91d1fb688c8f16d4e40203d39272f22b5bd0f796f0fa09c90*1*b48bda800b2b3665adca330cfc990283a604b08074521335437c0ed7f2a997069c88d620b638ee988edb3f6f32be1ccd01ffb14b66b2c213d31aad92b25f66f226f2793b5e554475ce8c1a7f9541ce66c594379303ce730fd77a6591c97f5bdc400ba7e8cbd496c188c2112208778ff9699674b117631d8f385ebe45ed91dd60a"
"4a657ca39c11c135e426c03ce2219392f55c635c1736f31b1a7a892273b6d9e2867864606aa0244b82c8be1748123f0b8478baa9402521583f24ac86c11801fe340e64628e8840aee6a093b1bf25aa05c74d1c1dd8ec48321b34a53bf78347a59fa9ee394a60b845cfd4c2f5bc53541065f1c5a0d3953d9808b26ee51d17dc026ea97a2ffae213bb9818f3c4009480ac0d1774e6237546204339db20ab366a805ba8c34304070959a16639006ced72bc3ba6430ef7e5a10e9a969ee233efc23b2d99bd8d49c3615f0da372cb98e077829f07e112a5bf4357a3cdee0268bbee69d31fea1ac66564d4b1c7c303f9b41e2b23b3c7825d1ef93ae1ca1aed1607177bf92cdce38fc68325a652efd3791e922a196eba24e9816c52afeb1d84577b8a22125c1d90beb57cacff4b2a637061d69bf7f1f006d102ca2acb8471909689d36196ec300691ddb9369868f3fd577e463d8b74c7a8e95fe2fd2954136f9650f7301d4a91d9c41f647675d37c1663d4b5c50cfb175facf30598a9be1ecc2f33fd4ec7e1ecc7dffbb1180a5b224b4eb6d0e0af4ecad6cbcb2a26cb3365a723caa2eacf9404083a427d5e7e62e967875e53a8eaf4f5873627717ce802b6b66d627f3390b50c0c950dac739ab46fad66920de3fb8edb0ad0a3c93e7b3beeb90a26a1553aecf4d1f3b17b7f852cf5441bd626012ca14d8e4aa2c43ef6a272f9f6990672b2ead99d839617069117aa10f840c379fc62de5ebf5c82ed59a5a1f76b0fec724ea809411709d88fd2f986c35edf9a562e3fd"
"bb13577e2ac78bb854768ab38850daf931c1b8cc3e6f3c244fb339d288348f88f792954e90b68d664b7f941b634aec4b2d54995ba08b999d32d007e85e7e0df4dc6022b0d6d7a23ac5bcbfb2dd6cdc300fd0e4c9b4403a53a67a1c8979774833ba4b8f338b1932424b8654e02ff039967bb43c3f0661bf22f638a4caef57d50acce63e472f1316fdb93e75218d630d958c1aef855a9a7bc54122a26ff94d78e74d48aff82a485f584b8acbea147666712d35a7167dc5f92ef4059e42c28ba66fbdccaafe71efc630b8ce7fd840bd2802c2d69a4b09a11cf17c9321d9ccfb1623bfaa89786df732b405e2cf118611e9ff153dd2db2df1953fdd888f023e74e23f3a5595b81456b6ffb33e91d65f08fc8eab545412b18be47d14ab77827073286a735187bed1b12fbed879969f7d06c53041a6bd79bf6c5260342480cdb50cb617c2b4111da501ea98f368320094c5353a36df520824ec52dd15e818bec43d80b537c0d809845645429ea4f7635528cb7b8149924053a76d3c05b0c31e5970eaa014708c64c902be5272513111a73e682ed9f473c87b964a4957934424bf957d1e86c6c90a967a8643eec2b65f08d4c91252cb9663a4e5aa4ad9180166ac633c0e5f5170656373489126e6be09e9e8bd6f226f0833bd392884dfce749d68ad51b1f0e0ef5fc5a8876e54558e191abcfc4632409547a8a5c46c2b546db07ba324b4d327ebe86f87dac27b64d6e0c8250019c1114a4f8fa39523dc3f5d597aa33af245ecca15ea8cbef7604eca5ed804ac4f57c12"
"6e335763925b88128b7289566270a5d7d1602481647f74d71bc1eafd0913851bcf07047dfef51b41fc02215d136885e647001f9f47546e9ea6ba0beab1d8a276cf9b85d780c05d4031f55d35d54c56f7fceeae9d62c58e7e928e591c2d6b1d14391f829f3e30bda6132bc513227cfad357be2c6f045bad7be72d01ceccd059327a72ce044edd534a5ddf71831bf07ebe84806feb621a5b8d71f4a608878e5e5daf3f8b4b3eda75f74f03d1ae5aebd029f037f66253f542aa06cd6c29ac5ed27ecdc7641fb6d54c98e71491772944303d3b6be683ac44b7bda5d49209133ff564cee31912b8e024cf628e0719522b11eff2e32874818f9a0ebde427657558a72943d6eb25c4b9d523336f37453af157035a3bc5ffd13847a928450d4e01f2ce7ca51d456939363c3e5a69b0d25311682c7b266cf86d12b63dcd322be77594c7f929a77467566a8d86a7d2b583b95f76626244738251fa762e0b2825c7668d6dde8ac5579c1a06318e5c5a6b2b1bc93bce6cd4853c50b6662482549290b15500722e3d6772c7541e3c864291dcbed84496dcc9ff4dddc974aa8b17b7ccea56c856f24ee2277a391c3c0c2c5584111ed24fe64e478e3c4d22380b8183222570fa3c70d29230aa21fd21808baacfd41e2430fed7c3316235e6b4c2c3331ee36d9e5c94ddbd73b351897cab7ede8a7c417c753d8023cf46694acbc9aa6ca556da7de108005330704cf54b1ec7bf7df02e36cd736237316b3523bca0a53a2472e68d30d95b1eb49282b27530bc69cd154b7a4dce75d"
"a3efc65c12ce45de7a63632d340fc61a1789129df1554813a15c9a6ad101c07363ba8d967b70ae1767f8927440678bab989dbe994922779c3c277055a35bf12d6909caba8a4b6bec7f49dd32426d858e53164c8db77bd1b9321b31e6c1ad1e92596bec4ad39d5b6944c7585a5ad0c6f83f64727a7f6397f784d865ba3b9c85343f3a2828a0e71d75f19036ea0f17e265750d6a01513be2bee0bd0a837996971b87305dafda12679bc118a1df188888396e10074254e4aeecb6801e00e8f3ade2889b65aba9e29d2d146001740116c893df1899175dbbf88ec175216df3d93a88fb6957adf64a3849e26194edb91188c0373fdf9be85a520c173817ccac3e4e9c88ce0bd9448be3f6cf3eb92b9337ecf2e63db5887e1113ee31529c373e83ec02012ddaa8812fa5c6b8be8febe29d0c286fe03832aee79018fdbaedd8bec03345c05faa1231ad148bf4531679738a537ec490bdcf78a0d9dd13e6988e360273c388b91006a66176c93caf3594cb098d5f4287a37d79b636eb566eaeb73ef76a4a480fad73caad3378d17a9395bf71c6c43f643b04b4f1773939329470e51053467b67ed8ac0807b8806d26d16f6f4fc15b3f3cc197d24ea26418cf970a5e7009bd871aff96be823fd80efe1adcaa882c168692b53bdb47effc666a1768d04d0d8bf199d36604e82b72fcce53e86d063c347aeecc79a846f8e12cdec679b857f85a75fe59a1338a411950459443b3fec6511dcc78d5bb6dc60accd6013400c0ef71f19d7713b37777a75e96d0d341d416c9cd94"
"7e3c442f6ddb31daec66bd96ca31b01d2dfb99d312a651ba5ec1765354de39d7aa4bb096ce7edbd93829d8ee2b7e3ff364f5d87f653a541f033db6c3266a03046f8612ad8d56a1c78912c9774c86a8d7e2eaa7f3bb1033470789ac2c32bd3c2ba1269bb01b176b167688f8fbe1f6094c3e2736bdc1cb1733364011681be98047cdad7d998241e121e6508cfd665c42b30f22bc442f940b5c7d93659f59abcb17aab1f28a02d0b59239f148211c525dd209cb932c54f24fa8a9541f0eab28b4c8df80845058e71e5447959bfc7f7d28e15542523410bc162f566875ed6d9d4fba519000b8c5d90f894f2bc74dc8307e26d4e0a9b418487d7470fbd64e97e660a3038a10a26a80e7cca09a3280ce3c87d07befd6f65127096d6075a18f30906828cee1f8b968dd3247210041078cf6d28f05977e5c172a9ecd83167873881e0ffcc56615ad0d64b0189ed8d559e43cccb1e2f8805df7156cb11f5df9dfbc067fce9fb3ee3230e28edfcf98741b9883f9f0f42913cc2be1036a0590107c69a9fadd4c9fc39df872f0db664ea7172fd72e0ad756be95417487d0c2bb38061c52124dcb2545f15a5bfd39d950b5878a067945733d8b1dc37cb85dd9393c98b0751c83d8e848fd1bd3ad243f6a8af7a8cb8cda7e1dc05324fa3932423fea0428131646534e74398f1604146da26a615045ee49ae2df3c8fcd16da64672845a946de4c26c1417c534a2b62a408a8c30c2e4f73ee44571259b628249c9e3f65e7b8d22002a170e7e53dc7c4cdc0073491db2cd6de20cd"
"df07501ff08378ac1cfe3ef479491f3fc475f8aa1fb188706c264e276da3e0399e2bc17cffd6ad0ff94d2d3b9a3b46e8c1472c41fc1c002daa76634f94b3bdf8560cb3241352c6f1be21fee70cd54a1d96e31d71ef99589b93e7ca8d026abcb4a4fbfc8c0f57d59a6d9e760f02fd0a569702da7f59da495c2dd7f92d60fb3220cd7932a032d40ed29deaa5fe971128c6503eb9d1029a23ed6dc4fd5e8c5cf0347841424d60a5a07a9781d08c85222cf7241d199609762488332a6eafbc08cec42c876da9bd3fa287bca12f71b6e33c4453afb970b425a45b9baa9aa69ebb3907e06e6610f100b00c86752b2c106c2e0b71963f1933d315ceef89132c7744149db0c28f62b3d7b43d570d1f5c40bf4b7470b3b8de30b0d756b8326542743f2fa5cf3eff226b6a658ecbe44dc9a0e59f073f999d8c3340ba30ecff6f2fa4f3815f0d4c665b5109ce8984971e5cbec806888c2acdf73d2a330de9e5133787aa4950d08759f4cfcb55ec8efb43d421cf3a9f601a096677eb95f61e352a9adae7c0b971fb455f170c7ed95329b699d6e93f024786507e2e0acbeffb452c26d8c041cb88316d09a08af54ec48451f9bb685a23910e97ac82bb41f19f6b42fa10cfb75f9fa8edd61653c14a27b51544e3fb28009aab76d060135df2d097fd4c2f2e63dba1192c648215fdd1dace4824d71e038e23184ede7f61baefd747aed93b9807d0b3b7b4f7cb9eb171d1ba241b19cf1c74781eaaaca99a458253777522dedcf3d1db6bd4eec4459e59ad635904201b5d91c77bb"
"b6e91f00f5a6f29794b35afde3dcd850f08ac5da097549ded05159567e9f7a023e08e49253766c0e151852714987201e90df675368ee638a947b7e6dc20bedf60656971170afe2d453662685dc1ceef8436ca8071680d0346239b41a6825839e9d5af12f9574d51b4672c5fa7f84bac497c8ba5fad2c10fbffe5ee713090b903d7723cd28c1b189a47c6a9fe9a88d0881dd60d1970c6e8a6d812bbd089c10841e5ced1417bef41f400118fa990d157bca93267d407989de017bd48f0231d43b9487526072e2755461274b3f5bf27847dda36c652a2b1fdd3815fd4ab93863426b31ecd1e6a9094dd2ed0190f8138e650dd2174fcc6b6ab1b8b91cc8020f2dcbb14855e7dd0bc1b5a01f55f81c0476daf1684cc4e72a68327120730ae92c45ab4e447c4ee900d61f79681667eec61343e4eebdd65c5b38a1ba5e3478f4d2f59d184ec39aca445a0f6edaa6840f04bfc19acf23db4507609cbdb44514b36aa5ef4ffe46577b711d1028970916eae919f1b4913d5894a24117cd7cc1aa8965840865554ce663af470455c0f756c795fb29eec04b727b12f7f3796f572ca2ec1e8771a88f68999e16b2acb235a7d9146f85f2be5a034babc3bdde750eb7895396d4777c144aee517a07310dcc8c9ce0ead93abb7f1eb4e34ed5036361d682c97eac1ad7c8158035e40a713f0f2e6f6e677d4b11ecc97e101a5b48420435dd218846ae622b416faeba7e0003bbbece71c2aa046715173b408c8ab2888b0b5dc4c34683f83ba9a83795f86122e6d80597d3a952a44f"
"5a1edb6f294a0ceebefc3cb54db814cf91fe450ed4c71d0b4091a1fc7474", "goodjob"},
{NULL}
};
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(dmg_password) * gws;
outsize = sizeof(dmg_hash) * gws;
settingsize = sizeof(dmg_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(dmg_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
char *p;
int headerver;
int res;
if (strncmp(ciphertext, "$dmg$", 5) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 5; /* skip over "$dmg$" marker */
if ((p = strtokm(ctcopy, "*")) == NULL)
goto err;
headerver = atoi(p);
if (headerver == 2) {
if ((p = strtokm(NULL, "*")) == NULL) /* salt len */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p) != res*2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ivlen */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (atoi(p) > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p) != res*2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted_keyblob_size */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 128)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted keyblob */
goto err;
if (hexlenl(p) != res*2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* chunk number */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* data_size */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "*")) == NULL) /* chunk */
goto err;
if (hexlenl(p) != res*2)
goto err;
if (res > 8192)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* scp */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
/* FIXME: which values are allowed here? */
if (res == 1) {
if ((p = strtokm(NULL, "*")) == NULL) /* zchunk */
goto err;
if (strlen(p) != 4096 * 2)
goto err;
}
}
else if (headerver == 1) {
if ((p = strtokm(NULL, "*")) == NULL) /* salt len */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p) != res*2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* len_wrapped_aes_key */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 296)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* wrapped_aes_key */
goto err;
if (hexlenl(p) != res*2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* len_hmac_sha1_key */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 300)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hmac_sha1_key */
goto err;
if (strlen(p) / 2 != res)
goto err;
}
else
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 5;
p = strtokm(ctcopy, "*");
cs.headerver = atoi(p);
if (cs.headerver == 2) {
p = strtokm(NULL, "*");
cs.saltlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.ivlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.ivlen; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.encrypted_keyblob_size = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.encrypted_keyblob_size; i++)
cs.encrypted_keyblob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.cno = atoi(p);
p = strtokm(NULL, "*");
cs.data_size = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.data_size; i++)
cs.chunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.scp = atoi(p);
if (cs.scp == 1) {
p = strtokm(NULL, "*");
for (i = 0; i < 4096; i++)
cs.zchunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
if ((p = strtokm(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
else {
p = strtokm(NULL, "*");
cs.saltlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.len_wrapped_aes_key = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.len_wrapped_aes_key; i++)
cs.wrapped_aes_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.len_hmac_sha1_key = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.len_hmac_sha1_key; i++)
cs.wrapped_hmac_sha1_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if ((p = strtokm(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
if (cs.iterations == 0)
cs.iterations = 1000;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, 20);
currentsalt.length = 20;
currentsalt.outlen = 32;
currentsalt.iterations = cur_salt->iterations;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int apple_des3_ede_unwrap_key1(const unsigned char *wrapped_key, const int wrapped_key_len, const unsigned char *decryptKey)
{
DES_key_schedule ks1, ks2, ks3;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char TEMP2[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char IV[8] = { 0x4a, 0xdd, 0xa2, 0x2c, 0x79, 0xe8, 0x21, 0x05 };
int outlen, i;
DES_set_key((DES_cblock*)(decryptKey + 0), &ks1);
DES_set_key((DES_cblock*)(decryptKey + 8), &ks2);
DES_set_key((DES_cblock*)(decryptKey + 16), &ks3);
DES_ede3_cbc_encrypt(wrapped_key, TEMP1, wrapped_key_len, &ks1, &ks2, &ks3,
(DES_cblock*)IV, DES_DECRYPT);
outlen = check_pkcs_pad(TEMP1, wrapped_key_len, 8);
if (outlen < 0)
return 0;
for (i = 0; i < outlen; i++)
TEMP2[i] = TEMP1[outlen - i - 1];
outlen -= 8;
DES_ede3_cbc_encrypt(TEMP2 + 8, TEMP1, outlen, &ks1, &ks2, &ks3,
(DES_cblock*)TEMP2, DES_DECRYPT);
outlen = check_pkcs_pad(TEMP1, outlen, 8);
if (outlen < 0)
return 0;
return 1;
}
static int hash_plugin_check_hash(unsigned char *derived_key)
{
unsigned char hmacsha1_key_[20];
unsigned char aes_key_[32];
int ret = 0;
if (cur_salt->headerver == 1) {
if (apple_des3_ede_unwrap_key1(cur_salt->wrapped_aes_key, cur_salt->len_wrapped_aes_key, derived_key) &&
apple_des3_ede_unwrap_key1(cur_salt->wrapped_hmac_sha1_key, cur_salt->len_hmac_sha1_key, derived_key)) {
return 1;
}
}
else {
DES_key_schedule ks1, ks2, ks3;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
AES_KEY aes_decrypt_key;
unsigned char outbuf[8192 + 1];
unsigned char outbuf2[4096 + 1];
unsigned char iv[20];
#ifdef DMG_DEBUG
unsigned char *r;
#endif
const char nulls[8] = { 0 };
DES_set_key((DES_cblock*)(derived_key + 0), &ks1);
DES_set_key((DES_cblock*)(derived_key + 8), &ks2);
DES_set_key((DES_cblock*)(derived_key + 16), &ks3);
memcpy(iv, cur_salt->iv, 8);
DES_ede3_cbc_encrypt(cur_salt->encrypted_keyblob, TEMP1,
cur_salt->encrypted_keyblob_size, &ks1, &ks2, &ks3,
(DES_cblock*)iv, DES_DECRYPT);
memcpy(aes_key_, TEMP1, 32);
memcpy(hmacsha1_key_, TEMP1, 20);
hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cur_salt->cno, 4, iv, 20);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->chunk, outbuf, cur_salt->data_size, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf, cur_salt->data_size, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found!\n\n");
#endif
ret = 1;
}
/* These tests seem to be obsoleted by the 8xNULL test */
#ifdef DMG_DEBUG
/* </plist> is a pretty generic signature for Apple */
if (memmem(outbuf, cur_salt->data_size, (void*)"</plist>", 8)) {
if (!bench_running)
fprintf(stderr, "</plist> found!\n\n");
ret = 1;
}
/* Journalled HFS+ */
if (memmem(outbuf, cur_salt->data_size, (void*)"jrnlhfs+", 8)) {
if (!bench_running)
fprintf(stderr, "jrnlhfs+ found!\n\n");
ret = 1;
}
/* Handle compressed DMG files, CMIYC 2012 and self-made
samples. Is this test obsoleted by the </plist> one? */
if ((r = memmem(outbuf, cur_salt->data_size, (void*)"koly", 4))) {
unsigned int *u32Version = (unsigned int *)(r + 4);
if (HTONL(*u32Version) == 4) {
if (!bench_running)
fprintf(stderr, "koly found!\n\n");
ret = 1;
}
}
/* Handle VileFault sample images */
if (memmem(outbuf, cur_salt->data_size, (void*)"EFI PART", 8)) {
if (!bench_running)
fprintf(stderr, "EFI PART found!\n\n");
ret = 1;
}
/* Apple is a good indication but it's short enough to
produce false positives */
if (memmem(outbuf, cur_salt->data_size, (void*)"Apple", 5)) {
if (!bench_running)
fprintf(stderr, "Apple found!\n\n");
ret = 1;
}
#endif /* DMG_DEBUG */
/* Second buffer test. If present, *this* is the very first block of the DMG */
if (cur_salt->scp == 1) {
int cno = 0;
hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cno, 4, iv, 20);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->zchunk, outbuf2, 4096, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf2, 4096, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found in alternate block!\n\n");
#endif
ret = 1;
}
#ifdef DMG_DEBUG
/* This test seem to be obsoleted by the 8xNULL test */
if (memmem(outbuf2, 4096, (void*)"Press any key to reboot", 23)) {
if (!bench_running)
fprintf(stderr, "MS-DOS UDRW signature found in alternate block!\n\n");
ret = 1;
}
#endif /* DMG_DEBUG */
}
#ifdef DMG_DEBUG
/* Write block as hex, strings or raw to a file. */
if (ret && !bench_running) {
#if DMG_DEBUG == 4
int fd;
if ((fd = open("dmg.debug.main", O_RDWR | O_CREAT | O_TRUNC, 0660)) == -1)
perror("open()");
else {
#if FCNTL_LOCKS
struct flock lock = { 0 };
lock.l_type = F_WRLCK;
while (fcntl(fd, F_SETLKW, &lock)) {
if (errno != EINTR)
pexit("fcntl(F_WRLCK)");
}
#elif OS_FLOCK
while (flock(fd, LOCK_EX)) {
if (errno != EINTR)
pexit("flock(LOCK_EX)");
}
#endif
if ((write(fd, outbuf, cur_salt->data_size) == -1))
perror("write()");
if (cur_salt->scp == 1)
if ((write(fd, outbuf2, 4096) == -1))
perror("write()");
if (close(fd))
perror("close");
}
#endif
#if DMG_DEBUG == 3
dump_stuff(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_stuff(outbuf2, 4096);
}
#endif
#if DMG_DEBUG == 2
dump_text(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_text(outbuf2, 4096);
}
#endif
}
#endif /* DMG_DEBUG */
}
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (hash_plugin_check_hash((unsigned char*)outbuffer[index].v) == 1)
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_opencl_dmg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef DMG_DEBUG
FMT_NOT_EXACT |
#endif
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{
"iteration count",
},
dmg_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
GB_unop__abs_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_uint32_uint32
// op(A') function: GB_unop_tran__abs_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_uint32_uint32
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_dense_ewise3_noaccum_template.c | //------------------------------------------------------------------------------
// GB_dense_ewise3_noaccum_template: C = A+B where all 3 matrices are dense
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_unused.h"
{
//--------------------------------------------------------------------------
// get A, B, and C
//--------------------------------------------------------------------------
// any matrix may be aliased to any other (C==A, C==B, and/or A==B)
GB_ATYPE *Ax = (GB_ATYPE *) A->x ;
GB_BTYPE *Bx = (GB_BTYPE *) B->x ;
GB_CTYPE *Cx = (GB_CTYPE *) C->x ;
const int64_t cnz = GB_nnz (C) ;
ASSERT (GB_as_if_full (A)) ;
ASSERT (GB_as_if_full (B)) ;
ASSERT (GB_IS_FULL (C)) ;
ASSERT (!C->iso) ;
ASSERT (!A->iso) ;
ASSERT (!B->iso) ;
int64_t p ;
//--------------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//--------------------------------------------------------------------------
#if GB_CTYPE_IS_BTYPE
if (C == B)
{
//----------------------------------------------------------------------
// C = A+C where A and C are dense
//----------------------------------------------------------------------
// C and B cannot be aliased if their types differ
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p, false) ; // aij = Ax [p]
GB_BINOP (GB_CX (p), aij, GB_CX (p), 0, 0) ; // Cx [p] = aij+Cx [p]
}
}
else
#endif
#if GB_CTYPE_IS_ATYPE
if (C == A)
{
//----------------------------------------------------------------------
// C = C+B where B and C are dense
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETB (bij, Bx, p, false) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ; // Cx [p] += bij
}
}
else
#endif
{
//----------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//----------------------------------------------------------------------
// note that A and B may still be aliased to each other
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p, false) ; // aij = Ax [p]
GB_GETB (bij, Bx, p, false) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), aij, bij, 0, 0) ; // Cx [p] = aij + bij
}
}
}
|
boys_taylor.h | #pragma once
#include "simint/boys/boys_shortgrid.h"
#include "simint/vectorization/vectorization.h"
#ifdef __cplusplus
#include "simint/cpp_restrict.hpp"
extern "C" {
#endif
extern double boys_shortgrid[BOYS_SHORTGRID_NPOINT][BOYS_SHORTGRID_MAXN+1];
static inline
void boys_F_taylor(double * restrict F, double x, int n)
{
const int lookup_idx = (int)(BOYS_SHORTGRID_LOOKUPFAC*(x+BOYS_SHORTGRID_LOOKUPFAC2));
const double xi = ((double)lookup_idx * BOYS_SHORTGRID_SPACE);
const double dx = xi-x; // -delta x
double const * restrict gridpts = &(boys_shortgrid[lookup_idx][0]);
#ifndef SIMINT_SCALAR
#pragma omp simd
#endif
for(int i = 0; i <= n; ++i)
{
double const * restrict gridpts2 = gridpts + i;
F[i*SIMINT_SIMD_LEN] = gridpts2[0]
+ dx * ( gridpts2[1]
+ dx * ( (1.0/2.0 ) * gridpts2[2]
+ dx * ( (1.0/6.0 ) * gridpts2[3]
+ dx * ( (1.0/24.0 ) * gridpts2[4]
+ dx * ( (1.0/120.0 ) * gridpts2[5]
+ dx * ( (1.0/720.0 ) * gridpts2[6]
+ dx * ( (1.0/5040.0) * gridpts2[7]
)))))));
}
}
static inline
double boys_F_taylor_single(double x, int n)
{
const int lookup_idx = (int)(BOYS_SHORTGRID_LOOKUPFAC*(x+BOYS_SHORTGRID_LOOKUPFAC2));
const double xi = ((double)lookup_idx * BOYS_SHORTGRID_SPACE);
const double dx = xi-x; // -delta x
double const * restrict gridpts = &(boys_shortgrid[lookup_idx][n]);
return gridpts[0]
+ dx * ( gridpts[1]
+ dx * ( (1.0/2.0 ) * gridpts[2]
+ dx * ( (1.0/6.0 ) * gridpts[3]
+ dx * ( (1.0/24.0 ) * gridpts[4]
+ dx * ( (1.0/120.0 ) * gridpts[5]
+ dx * ( (1.0/720.0 ) * gridpts[6]
+ dx * ( (1.0/5040.0) * gridpts[7]
)))))));
}
static inline
void boys_F_taylor_vec(SIMINT_DBLTYPE * restrict F, SIMINT_DBLTYPE x, int n)
{
double * restrict Fd = (double *)F;
double * restrict xd = (double *)&x;
#ifndef SIMINT_SCALAR
// workaround for GCC missing simdlen??
#if defined __clang__ || defined __INTEL_COMPILER
#pragma omp simd simdlen(SIMINT_SIMD_LEN)
#else
#pragma omp simd
#endif
#endif
for(int v = 0; v < SIMINT_SIMD_LEN; v++)
{
const int lookup_idx = (int)(BOYS_SHORTGRID_LOOKUPFAC*(xd[v]+BOYS_SHORTGRID_LOOKUPFAC2));
const double xi = ((double)lookup_idx * BOYS_SHORTGRID_SPACE);
const double dx = xi-xd[v]; // -delta x
double const * restrict gridpts = &(boys_shortgrid[lookup_idx][0]);
for(int i = 0; i <= n; ++i)
{
double const * restrict gridpts2 = gridpts + i;
Fd[i*SIMINT_SIMD_LEN + v] = gridpts2[0]
+ dx * ( gridpts2[1]
+ dx * ( (1.0/2.0 ) * gridpts2[2]
+ dx * ( (1.0/6.0 ) * gridpts2[3]
+ dx * ( (1.0/24.0 ) * gridpts2[4]
+ dx * ( (1.0/120.0 ) * gridpts2[5]
+ dx * ( (1.0/720.0 ) * gridpts2[6]
+ dx * ( (1.0/5040.0) * gridpts2[7]
)))))));
}
}
}
static inline
SIMINT_DBLTYPE boys_F_taylor_single_vec(SIMINT_DBLTYPE x, int n)
{
SIMINT_DBLTYPE ret;
double * retd = (double *)&ret;
double * restrict xd = (double *)&x;
#ifndef SIMINT_SCALAR
// workaround for GCC missing simdlen??
#if defined __clang__ || defined __INTEL_COMPILER
#pragma omp simd simdlen(SIMINT_SIMD_LEN)
#else
#pragma omp simd
#endif
#endif
for(int v = 0; v < SIMINT_SIMD_LEN; v++)
{
const int lookup_idx = (int)(BOYS_SHORTGRID_LOOKUPFAC*(xd[v]+BOYS_SHORTGRID_LOOKUPFAC2));
const double xi = ((double)lookup_idx * BOYS_SHORTGRID_SPACE);
const double dx = xi-xd[v]; // -delta x
double const * restrict gridpts = &(boys_shortgrid[lookup_idx][n]);
retd[v] = gridpts[0]
+ dx * ( gridpts[1]
+ dx * ( (1.0/2.0 ) * gridpts[2]
+ dx * ( (1.0/6.0 ) * gridpts[3]
+ dx * ( (1.0/24.0 ) * gridpts[4]
+ dx * ( (1.0/120.0 ) * gridpts[5]
+ dx * ( (1.0/720.0 ) * gridpts[6]
+ dx * ( (1.0/5040.0) * gridpts[7]
)))))));
}
return ret;
}
#ifdef __cplusplus
}
#endif
|
DOT_R2_Solver.h | /**
* @fileoverview Copyright (c) 2019, Stefano Gualandi,
* via Ferrata, 1, I-27100, Pavia, Italy
*
* @author stefano.gualandi@gmail.com (Stefano Gualandi)
*
*/
#pragma once
#include <omp.h>
#include <cassert>
#include <chrono>
#include <cinttypes>
#include <fstream>
#include <limits>
#include <random>
#include <sstream>
#include <vector>
using std::vector;
#include <array>
using std::array;
#include "DOT_DotSimplex.h"
#include "DOT_Vars.h"
// Distance function between a pair of point in R^k
inline constexpr auto DISTANCE_R2(const double* x, const double* y) {
return (x[0] - y[0]) * (x[0] - y[0]) + (x[1] - y[1]) * (x[1] - y[1]);
}
namespace DOT {
namespace R2 {
// Container for general discrete measure
template <typename FlowType = int, typename PosType = double>
class GMeasureR2 {
public:
GMeasureR2() {}
// Read from file (e.g., DOTMark images)
GMeasureR2(const std::string& filename) { readFromFile(filename); }
// setter
void reserve(size_t n) {
Ws.reserve(n);
Ps.reserve(2 * n);
}
void add(FlowType _w, PosType _p1, PosType _p2) {
Ws.emplace_back(_w);
Ps.emplace_back(_p1);
Ps.emplace_back(_p2);
}
// Use as few memory as possible
void shrink_to_fit() {
Ws.shrink_to_fit();
Ps.shrink_to_fit();
}
// getters
size_t size() const { return Ws.size(); }
FlowType getW(size_t i) const { return Ws[i]; }
inline const PosType* getP(size_t i) const { return &Ps[2 * i]; }
// Parse from file
void readFromFile(const std::string& filename) {
std::ifstream in_file(filename);
if (!in_file) {
fprintf(stdout, "FATAL ERROR: Cannot open file %s", filename.c_str());
exit(EXIT_FAILURE);
}
// Read first line
auto read_row = [&](size_t i) {
int j = 0;
std::string line;
std::getline(in_file, line);
std::stringstream lineStream(line);
std::string cell;
while (std::getline(lineStream, cell, ',')) {
add(stoi(cell), i, j);
++j;
}
return j;
};
// Read first row, and return row length
int n = read_row(0);
reserve(n);
for (size_t i = 1; i < n; ++i) read_row(i);
in_file.close();
shrink_to_fit();
}
private:
vector<FlowType> Ws;
vector<PosType> Ps;
};
typedef GMeasureR2<int, double> MeasureR2;
MeasureR2 createRandom0N(size_t n, int seed = 13) {
MeasureR2 mu;
mu.reserve(n);
std::random_device
rd; // Will be used to obtain a seed for the random number engine
std::mt19937 gen(seed); // Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> Uniform01(0, 1);
for (size_t i = 0; i < n; i++) mu.add(1, Uniform01(gen), Uniform01(gen));
return mu;
}
// Solve separation problem as single core problem
int solveSeparation(const MeasureR2& Mu, const vector<double>& U,
const MeasureR2& Nu, const vector<double>& V, Vars& vars,
double vmin) { // Avoid useless memory allocations
int m = Mu.size();
int n = Nu.size();
assert(m == vars.size());
int cmp = 0;
for (int i = 0; i < m; ++i) {
if (U[i] > FEASIBILITY_TOL + vmin) {
double best_v = -FEASIBILITY_TOL;
double best_c = -1;
int best_j = 0;
for (int j = 0; j < n; ++j) {
double violation = U[i] - V[j];
if (violation > -best_v) {
double c_ij = DISTANCE_R2(Mu.getP(i), Nu.getP(j));
cmp++;
violation = c_ij - violation;
if (violation < best_v) {
best_v = violation;
best_c = c_ij;
best_j = j;
// if (U[i] <= -best_v + vmin) break;
}
}
}
// Store most violated cuts for element i
vars[i].b = m + best_j;
vars[i].c = best_c;
}
}
// fprintf(stdout, "cmp: %d\n", cmp);
return cmp;
} // namespace R2
// Solve separation problem as multi core problem
void solveSeparationCore(const MeasureR2& Mu, const vector<double>& U,
const MeasureR2& Nu, const vector<double>& V,
Vars& vars,
double vmin) { // Avoid useless memory allocations
int m = Mu.size();
int n = Nu.size();
assert(m == vars.size());
#pragma omp parallel
{
#pragma omp for schedule(dynamic, 1)
for (int i = 0; i < m; ++i) {
// vars[i].c = -1;
if (U[i] > FEASIBILITY_TOL + vmin) {
double best_v = -FEASIBILITY_TOL;
double best_c = -1;
int best_j = 0;
for (int j = 0; j < n; ++j) {
double violation = U[i] - V[j];
if (violation > -best_v) {
double c_ij = DISTANCE_R2(Mu.getP(i), Nu.getP(j));
violation = c_ij - violation;
if (violation < best_v) {
best_v = violation;
best_c = c_ij;
best_j = j;
// if (U[i] <= -best_v + vmin) break;
}
}
}
// Store most violated cuts for element i
vars[i].b = m + best_j;
vars[i].c = best_c;
}
}
}
} // namespace R2
// void solveSeparationGPU(concurrency::array_view<double, 2> xv,
// vector<double>& U,
// concurrency::array_view<double, 2> yv,
// vector<double>& V, Vars& vars, int n, double vmin) {
// concurrency::array_view<double> Uv((int)U.size(), &U[0]);
// concurrency::array_view<double> Vv((int)V.size(), &V[0]);
//
// concurrency::array_view<Var> cv(vars.size(), vars);
// int m = U.size();
//
// concurrency::parallel_for_each(
// cv.extent, [=](concurrency::index<1> idx) restrict(amp) {
// if (Uv[idx[0]] > FEASIBILITY_TOL + vmin) {
// double best_v = -FEASIBILITY_TOL;
// double best_c = -1;
// int best_j = 0;
//
// for (int j = 0; j < n; ++j) {
// double violation = Uv[idx] - Vv[j];
// if (violation > -best_v) {
// double c_ij =
// (xv(0, idx[0]) - yv(0, j)) * (xv(0, idx[0]) - yv(0, j)) +
// (xv(1, idx[0]) - yv(1, j)) * (xv(1, idx[0]) - yv(1, j));
// violation = c_ij - violation;
// if (violation < best_v) {
// best_v = violation;
// best_c = c_ij;
// best_j = j;
// }
// }
// }
//
// // Store most violated cuts for element i
// cv[idx].b = m + best_j;
// cv[idx].c = best_c;
// }
// });
//
// try {
// cv.synchronize();
// } catch (const Concurrency::accelerator_view_removed& e) {
// fprintf(stdout, "solveSeparationGPU: %s\n", e.what());
// }
//}
//
// void solveSeparationGPUTile(concurrency::array_view<double, 2> xv,
// vector<double>& U,
// concurrency::array_view<double, 2> yv,
// vector<double>& V, Vars& vars, int n, double vmin)
// {
// concurrency::array_view<double> Uv((int)U.size(), &U[0]);
// concurrency::array_view<double> Vv((int)V.size(), &V[0]);
//
// concurrency::array_view<Var> cv(vars.size(), vars);
//
// int m = U.size();
//
// static const int TS = 128;
// static const int TK = 2;
//
// concurrency::parallel_for_each(
// cv.extent.tile<TS>(),
// [=](concurrency::tiled_index<TS> t_idx) restrict(amp) {
// // Prepare shared tile
// int col = t_idx.local[0];
// // if (Uv[col] > FEASIBILITY_TOL + vmin) {
// int colGlobal = t_idx.global[0];
// tile_static double A[TK][TS];
// A[0][col] = xv(0, colGlobal);
// A[1][col] = xv(1, colGlobal);
// tile_static double Lu[TS];
// Lu[col] = Uv[colGlobal];
//
// // Local best cost
// int best_j = 0;
// double best_c = -1;
// double best_v = -FEASIBILITY_TOL;
//
// // Internal loop between pair of points
// for (int i = 0; i < n; i += TS) {
// tile_static double B[TK][TS];
// B[0][col] = yv(0, i + col);
// B[1][col] = yv(1, i + col);
// tile_static double Lv[TS];
// Lv[col] = Vv[i + col];
//
// t_idx.barrier.wait();
//
// for (int j = 0; j < TS; ++j) {
// double violation = Lu[col] - Lv[j];
// if (violation > -best_v) {
// double c_ij = (A[0][col] - B[0][j]) * (A[0][col] - B[0][j]) +
// (A[1][col] - B[1][j]) * (A[1][col] - B[1][j]);
// // Lower precision, but faster speed using float instead of
// // double We do not use it for improving numerical stability
// /*concurrency::fast_math::pow(A[0][col] - B[0][j], 2) +
// concurrency::fast_math::pow(A[1][col] - B[1][j],
// 2);*/
// violation = c_ij - violation;
// if (violation < best_v) {
// best_v = violation;
// best_c = c_ij;
// best_j = i + j;
// }
// }
// }
//
// t_idx.barrier.wait();
// }
//
// // Store most violated cuts for element i
// cv[colGlobal].b = m + best_j;
// cv[colGlobal].c = best_c;
// //}
// });
//
// try {
// cv.synchronize();
// } catch (const Concurrency::accelerator_view_removed& e) {
// fprintf(stdout, "solveSeparationGPUTile: %s\n", e.what());
// }
//}
// Compute Kantorovich-Wasserstein distance between two measures
void DenseTransportationLP(const MeasureR2& Mu, const MeasureR2& Nu, int algo,
const std::string& msg) {
// Timinig output
auto start = std::chrono::high_resolution_clock::now();
start = std::chrono::high_resolution_clock::now();
int m = (int)Mu.size();
int n = (int)Nu.size();
typedef double CostType;
typedef int64_t FlowType;
// Build the graph for min cost flow
DotSimplex<FlowType, CostType> simplex(n + m);
// add first d source nodes
for (int i = 0; i < m; ++i) simplex.addNode(i, +FlowType(Mu.getW(i)));
for (int j = 0; j < n; ++j) simplex.addNode(m + j, -FlowType(Nu.getW(j)));
simplex.resizeArcMemory(size_t(n * m));
#pragma omp parallel
{
#pragma omp for schedule(static, m)
for (int i = 0; i < m; ++i)
for (int j = 0; j < n; ++j)
simplex.setArc(i * m + j, i, m + j,
DISTANCE_R2(Mu.getP(i), Nu.getP(j)));
}
//// Solve the problem to compute the distance
DotSimplex<FlowType, CostType>::ProblemType status = simplex.run();
switch (status) {
case DotSimplex<>::INFEASIBLE:
fprintf(stdout, "INFEASIBLE\n");
break;
case DotSimplex<>::OPTIMAL:
break;
case DotSimplex<>::UNBOUNDED:
fprintf(stdout, "UNBOUNDED\n");
break;
}
CostType fobj = simplex.totalCost();
auto end = std::chrono::high_resolution_clock::now();
double elapsed =
double(std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count()) /
1000;
fprintf(stdout, "%s %d %d Runtime %.6f Value %.6f status %d RAM %.2f\n",
msg.c_str(), n, simplex.num_arcs(), elapsed, fobj, status,
getUsedRAM());
fflush(stdout);
}
// Compute Kantorovich-Wasserstein distance between two measures
void ColumnGeneration(const MeasureR2& Mu, const MeasureR2& Nu, int algo,
const std::string& msg) {
int m = (int)Mu.size();
int n = (int)Nu.size();
// Timinig output
auto start = std::chrono::high_resolution_clock::now();
auto end = std::chrono::high_resolution_clock::now();
double elapsed = getMs(start, end);
start = std::chrono::high_resolution_clock::now();
// Solve the problem
typedef double CostType;
typedef int64_t FlowType;
// Build the graph for min cost flow
DotSimplex<FlowType, CostType> simplex(n + m);
// add first d source nodes
for (int i = 0; i < m; ++i) simplex.addNode(i, +FlowType(Mu.getW(i)));
for (int j = 0; j < n; ++j) simplex.addNode(m + j, -FlowType(Nu.getW(j)));
int it = 0;
int n_cuts = 0;
CostType fobj = 0;
double time_tot = 0;
double sep_tot = 0;
double mas_tot = 0;
int cmp_tot = 0;
vector<double> A(m, 0);
vector<double> B(n, 0);
DOT::Vars vars(m);
for (int i = 0; i < m; ++i) vars[i].a = i;
DOT::Vars vnew;
vnew.reserve(2 * size_t(m + n) + 1);
//// Support for GPU
// const size_t K = 2;
// vector<double> XView(K * Mu.size());
// for (int i = 0; i < m; ++i) {
// auto p = Mu.getP(i);
// for (int k = 0; k < K; ++k) XView[i + k * m] = p[k];
//}
// vector<double> YView(K * Nu.size());
// for (int i = 0; i < n; ++i) {
// auto p = Nu.getP(i);
// for (int k = 0; k < K; ++k) YView[i + k * m] = p[k];
//}
// concurrency::array_view<double, 2> xv(K, m, XView);
// concurrency::array_view<double, 2> yv(K, n, YView);
// Init the simplex
DotSimplex<FlowType, CostType>::ProblemType status = simplex.run();
// Start separation
while (true) {
start = std::chrono::high_resolution_clock::now();
DotSimplex<FlowType, CostType>::ProblemType status = simplex.reRun();
end = std::chrono::high_resolution_clock::now();
elapsed = getMs(start, end);
// Take the dual values
for (int i = 0; i < m; ++i) A[i] = -simplex.potential(i);
double umin = std::numeric_limits<double>::infinity();
for (int j = 0; j < n; ++j) {
B[j] = -simplex.potential(m + j);
umin = std::min<double>(umin, B[j]);
}
mas_tot += elapsed;
time_tot += elapsed;
// Solve separation problem (with timing)
auto sep_s = std::chrono::high_resolution_clock::now();
if (algo == 0) cmp_tot += solveSeparation(Mu, A, Nu, B, vars, umin);
if (algo == 1) solveSeparationCore(Mu, A, Nu, B, vars, umin);
// if (algo == 2) solveSeparationGPU(xv, A, yv, B, vars, n, umin);
// if (algo == 3) solveSeparationGPUTile(xv, A, yv, B, vars, n, umin);
auto sep_e = std::chrono::high_resolution_clock::now();
auto sep_elapsed = getMs(start, end);
sep_tot += sep_elapsed;
time_tot += sep_elapsed;
start = std::chrono::high_resolution_clock::now();
vnew.clear();
for (auto& v : vars) {
if (v.c > -1) vnew.push_back(v);
v.c = -1;
}
if (vnew.empty()) break;
std::sort(vnew.begin(), vnew.end(),
[](const auto& v, const auto& w) { return v.c > w.c; });
// Replace old constraints with new ones
int new_arcs = simplex.updateArcs(vnew);
end = std::chrono::high_resolution_clock::now();
elapsed += getMs(start, end);
mas_tot += elapsed;
time_tot += elapsed;
n_cuts += new_arcs;
fobj = simplex.totalCost();
// fprintf(stdout,
// "it %d: Time %.3f - Value: %.6f - NumRows: %d - SepTime: % .6f - "
// "GuTime: % .6f - Cuts: % d\n ",
// it, time_tot, fobj, simplex.num_arcs(), sep_elapsed, elapsed,
// new_arcs);
++it;
}
fobj = simplex.totalCost();
simplex.checkFeasibility();
end = std::chrono::high_resolution_clock::now();
elapsed = getMs(start, end);
fprintf(stdout,
"%s %d it %d FinalTime %.4f SepTime %.4f Master %.4f Value %.6f "
"AddedVars %d NumVars %d CmpTot %d CmpRatio %.2f RAM %.2f\n",
msg.c_str(), algo, it, time_tot, sep_tot, mas_tot, fobj, n_cuts,
simplex.num_arcs(), cmp_tot, double(cmp_tot) / double(n * m),
getUsedRAM());
fflush(stdout);
}
} // namespace R2
} // namespace DOT |
bitonic_sort.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <omp.h>
struct timeval st, et;
void swap(int*, int, int);
void rng(int*, int, int*);
void buildDummy(int*, int, int, int);
void impBitonicSortPar(int*, int, int);
void impBitonicSortSer(int*, int);
int getPowTwo(int);
void writeToFile(int*, int, char*);
int thr;
int main(int argc, char **argv) {
int n, dummy_n, threads, test, max_x;
int* arr;
int* arr2;
thr = omp_get_max_threads();
// get maximum effective threads
threads = omp_get_max_threads();
if (argc < 2) {
printf("Usage: %s <n> <p>\nwhere <n> is problem size, <p> is number of thread (optional)\n\n", argv[0]);
exit(1);
}
if (argc == 3){
// use for custom threads number
threads = atoi(argv[2]);
}
// true for test with 1,2,4,8,...,256 threads number
test = threads<=0;
if(test){
printf("test\n");
}
// get problem size;
n = atoi(argv[1]);
// get dummy_n from nearest power of two
dummy_n = getPowTwo(n);
// prepare random numbers
arr = (int*) malloc(dummy_n*sizeof(int));
if(!arr){
printf("Unable to allocate memory\n");
exit(1);
}
rng(arr,n,&max_x);
buildDummy(arr,n,dummy_n,max_x);
// copy
arr2 = (int*) malloc(dummy_n*sizeof(int));
memcpy(arr2,arr,dummy_n*sizeof(int));
// write random numbers to input file
writeToFile(arr,n,"./data/input");
// execute serial
gettimeofday(&st,NULL);
impBitonicSortSer(arr,dummy_n);
gettimeofday(&et,NULL);
int elapsed_serial = ((et.tv_sec - st.tv_sec) * 1000000) + (et.tv_usec - st.tv_usec);
printf("Execution serial time: %d micro sec\n",elapsed_serial);
// execute paralel
gettimeofday(&st,NULL);
impBitonicSortPar(arr2,dummy_n,threads);
gettimeofday(&et,NULL);
int elapsed_paralel = ((et.tv_sec - st.tv_sec) * 1000000) + (et.tv_usec - st.tv_usec);
printf("Execution paralel time: %d micro sec\n",elapsed_paralel);
// calculate speedup
printf("Speedup : %.3f\n",(float)elapsed_serial/elapsed_paralel);
writeToFile(arr,n,"./data/output");
free(arr);
free(arr2);
return 0;
}
void writeToFile(int* arr, int n, char* path){
FILE* f = fopen(path,"w");
for(int i=0; i<n; i++) {
fprintf(f, "%d\n", arr[i]);
}
fclose(f);
}
void rng(int* arr, int n, int* max_x) {
int seed = 13515097;
srand(seed);
for(long i = 0; i < n; i++) {
arr[i] = (int)rand();
*max_x = ((i==0 || *max_x<arr[i])?arr[i]:*max_x);
}
}
void buildDummy(int* arr,int N,int dummy_n, int max_x){
for(long i = N; i < dummy_n; i++) {
arr[i]=max_x;
}
}
void swap(int* a, int i, int j) {
int t;
t = a[i];
a[i] = a[j];
a[j] = t;
}
/*
Imperative paralel bitonic sort
*/
void impBitonicSortPar(int* a, int n, int threads) {
int i,j,k;
int dummy_n = getPowTwo(n);
for (k=2; k<=dummy_n; k=2*k) {
for (j=k>>1; j>0; j=j>>1) {
// bitonic increasing
#pragma omp parallel for num_threads(threads) private(i) shared(n,j,k)
for (i=0; i<n; i++) {
int ij=i^j;
if ((ij)>i) {
// monotonic increasing
if ((i&k)==0 && a[i] > a[ij]) swap(a,i,ij);
// monotonic decreasing
if ((i&k)!=0 && a[i] < a[ij]) swap(a,i,ij);
}
}
}
}
}
/*
Imperative serial bitonic sort
*/
void impBitonicSortSer(int* a, int n) {
int i,j,k;
int dummy_n = getPowTwo(n);
for (k=2; k<=dummy_n; k=2*k) {
for (j=k>>1; j>0; j=j>>1) {
// bitonic increasing
for (i=0; i<n; i++) {
int ij=i^j;
if ((ij)>i) {
// monotonic increasing
if ((i&k)==0 && a[i] > a[ij]) swap(a,i,ij);
// monotonic decreasing
if ((i&k)!=0 && a[i] < a[ij]) swap(a,i,ij);
}
}
}
}
}
int getPowTwo(int n){
int d=1;
while (d>0 && d<n) d<<=1;
return d;
}
|
_ex1.c | void ex1(int n, double ss, double* a, double* b, double* y) {
register int i;
#pragma Orio Loops(transform Pragma(pragma_str="omp parallel for"))
{
#pragma omp parallel for
for (i=0; i<n; i++ ) {
y[i]=b[i]+ss*a[i];
}
}
#pragma Oiro
}
|
F-type_fermi_dirac.c | /*
A. Odrzywolek, AOdrzywolek
*/
#include "../fermidirac.h"
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <stdio.h>
#include <float.h>
/* Functions below are integrated with so-called DoubleExponential or Tanh-Sinh quadrature.
*
* Some references:
*
* Mori, Masatake (2005), "Discovery of the double exponential transformation and its developments",
* Publications of the Research Institute for Mathematical Sciences 41 (4): 897–935,
* doi:10.2977/prims/1145474600,
* ISSN 0034-5318
* http://www.kurims.kyoto-u.ac.jp/~okamoto/paper/Publ_RIMS_DE/41-4-38.pdf, eq. (4.17)
*
* See also: http://en.wikipedia.org/wiki/Tanh-sinh_quadrature and references therein.
*
*/
/*
SECTION FOR RELATIVISTIC Fermi-Dirac integrals (F-function)
*/
double integrandF(const double t, const double k, const double eta, const double theta)
{
double x,dx,integrand,result,factor;
/* if(t>-6.5)
*
* this is min t=-9.3, for which exp(t-exp(-t))
is still smaller than LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but
using proper (unsafe?) coding modern CPU's do calculations internally in long double format anyway.
NOTE: obsolete, see comment below where optimal coding for integrand is described.
*/
x = exp( t - exp(-t) ); /* Masatake Mori, eq. (4.17) */
//if( (eta>k) && (k>0) ) x = eta*exp(t-exp(-t)); else x = exp(t-exp(-t));
//dx = x*(1 + exp(-t) ); /* dx/dt */
dx = 1.0+exp(-t); /* in this case x is adsorbed in integrand, and x^k -> x^(k+1) */
if(x-eta<-log(DBL_EPSILON)) // if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal
{
factor = 1.0/(1.0+exp(x-eta) );
integrand = exp( (k+1.0)*(t - exp(-t)) );
//integrand = pow(x,k+1.0);
integrand = integrand*sqrt(1.0+0.5*theta*x)*factor;
}
else
{
//factor = exp(eta-x) adsorbed into exp, to avoid 0*infinity mess
integrand = exp((k+1.0)*(t - exp(-t)) + eta - x );
integrand = integrand*sqrt(1.0+ 0.5*theta*x);
}
/* NOTE:
*
* if we use:
*
* integrand = pow(x,k+1.0)*sqrt(1.0+ 0.5*theta*x)*factor;
*
* then:
*
* a) precision is lost, beacuse x is double, while exp((k+1.0)*(t - exp(-t)) )
* is internally handled as long double (96 bit)
* b) if k<0 we lost advantage of postponed underflow ( k+1 << 1 in such a case )
*
*/
#if DEBUG
printf("DEBUG300: factor = %.20Lf, x=%.20Lf, dx=%.20Lf, integrand=%.20Lf, return = %.20Lf \t test= %.20Lf \n",factor, x,dx,integrand, (integrand*dx),test);
#endif
result = integrand*dx;
return result;
}
long double integrandF_long(const long double t, const long double k, const long double eta, const long double theta)
{
long double x,dx,integrand,result,factor;
//const double lambda = M_E*100.5;//scaling factor
/* if(t>-6.5)
*
* this is min t=-9.3, for which exp(t-exp(-t))
is still smaller than LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but
using proper (unsafe?) coding modern CPU's do calculations internally in long double format anyway.
NOTE: obsolete, see comment below where optimal coding for integrand is described.
*/
x = expl( t - expl(-t) ); /* Masatake Mori, eq. (4.17) */
//dx = x*(1 + exp(-t) ); /* dx/dt */
dx = 1.0L+exp(-t); /* in this case x is adsorbed in integrand, and x^k -> x^(k+1) */
if(x-eta<-logl(LDBL_EPSILON)) // if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal
{
factor = 1.0L/(1.0L+expl(x-eta) );
//integrand = expl( (kL+1.0L)*(tL - expl(-tL)) );
integrand = powl(x,k+1.0L);
integrand = integrand*sqrtl(1.0L+0.5L*theta*x)*factor;
}
else
{
//factor = exp(eta-x) adsorbed into exp, to avoid 0*infinity mess
integrand = expl((k+1.0L)*(t - expl(-t)) + eta - x );
integrand = integrand*sqrtl(1.0L+ 0.5L*theta*x);
}
result = integrand*dx;
return result;
}
double Ffermi_estimate(double h, double last_result, double k, double eta, double theta)
{
int step,i;
double sum_Left_old, sum_Right_old;
double sum_Left_new, sum_Right_new;
double old_result, new_result;
#if KAHAN
double c=0.0,t,y; // https://en.wikipedia.org/wiki/Kahan_summation_algorithm
#endif
if(last_result<0.0) /* Negative value means first iteration*/
{
step=1;
old_result = 2.0*h*integrandF(0.0, k, eta, theta);
}
else
{
step=2;
old_result = last_result;
}
#if DEBUG
printf("DEBUG2: old=%e,\tlast=%e\n",old_result,last_result);
#endif
/* integral for 0 < t < Infinity */
sum_Right_old = 0.0;
sum_Right_new = 0.0;
i=1;
/* possible vectorization, but loop step must be known at compile time!
#pragma omp simd
#pragma ivdep
for(i=1;i<=16;i+=2)
{
sum_Right_new += integrandF(h*i, k, eta, theta);
}
*/
do
{
sum_Right_old = sum_Right_new;
#if KAHAN
y = integrandF(h*i, k, eta, theta) - c;
t = sum_Right_new + y;
c = (t-sum_Right_new) - y;
sum_Right_new = t;
#else
sum_Right_new = sum_Right_old + integrandF(h*i, k, eta, theta);
//sum_Right_new = sum_Right_old + integrandF(h*i, k, eta, theta);
#endif
i = i + step;
}
while ( sum_Right_old<sum_Right_new ); //floating point fixed-point method
/* integral for -Infinity < t <0 */
sum_Left_old = 0.0;
sum_Left_new = 0.0;
#if KAHAN
c = 0.0;
#endif
i=-1;
do
{
sum_Left_old = sum_Left_new;
#if KAHAN
y = integrandF(h*i, k, eta, theta) - c;
t = sum_Left_new + y;
c = (t-sum_Left_new) - y;
sum_Left_new = t;
#else
sum_Left_new = sum_Left_old + integrandF(h*i, k, eta, theta);
#endif
i = i - step;
}
while (sum_Left_old<sum_Left_new);
new_result = h*(sum_Left_new + sum_Right_new) + 0.5*old_result;
return new_result;
}
long double Ffermi_estimate_long(long double h, long double last_result, long double k, long double eta, long double theta)
{
int step,i;
long double sum_Left_old, sum_Right_old;
long double sum_Left_new, sum_Right_new;
long double old_result, new_result;
if(last_result<0.0L) /* Negative value means first iteration*/
{
step=1;
old_result = 2.0L*h*integrandF_long(0.0L, k, eta, theta);
}
else
{
step=2;
old_result = last_result;
}
/* integral for 0 < t < Infinity */
sum_Right_old = 0.0;
sum_Right_new = 0.0;
i=1;
do
{
sum_Right_old = sum_Right_new;
sum_Right_new = sum_Right_old + integrandF_long(h*i, k, eta, theta);
i = i + step;
}
while ( sum_Right_old<sum_Right_new ); //floating point fixed-point method
/* integral for -Infinity < t <0 */
sum_Left_old = 0.0;
sum_Left_new = 0.0;
i=-1;
do
{
sum_Left_old = sum_Left_new;
sum_Left_new = sum_Left_old + integrandF_long(h*i, k, eta, theta);
i = i - step;
}
while (sum_Left_old<sum_Left_new);
new_result = h*(sum_Left_new + sum_Right_new) + 0.5L*old_result;
return new_result;
}
double Ffermi_value(const double k, const double eta, const double theta,
const double precision, const int recursion_limit)
{
double old=0.0, new=0.0, h=0.5;
if(k<=-1.0) return nan("NaN"); /* not converging for k <= -1 */
#if DEBUG
printf("DEBUG0: h=%lf,\tval=%e\n",h,new);
#endif
old = 0.0;
new = Ffermi_estimate(h, -1.0, k, eta, theta);
#if DEBUG
printf("DEBUG1: h=%lf,\tval=%e\n",h,new);
#endif
while( fabs(old-new)>precision*fabs(new) && h>pow(2.0,-recursion_limit))
{
old=new;
h=0.5*h;
new = Ffermi_estimate(h, old, k, eta, theta);
#if DEBUG
printf("DEBUG4: h=%lf,\tval=%e\n",h,new);
#endif
}
return new;
}
long double Ffermi_dblexp_long(const long double k, const long double eta, const long double theta, const long double precision, const int recursion_limit)
{
long double old=0.0L, new=0.0L, h=0.5L;
if(k<=-1.0L) return nan("NaN"); /* not converging for k <= -1 */
old = 0.0L;
new = Ffermi_estimate_long(h, -1.0L, k, eta, theta);
while( fabsl(old-new)>precision*fabsl(new) && h>powl(2.0L,-recursion_limit))
{
old=new;
h=0.5L*h;
new = Ffermi_estimate_long(h, old, k, eta, theta);
}
return new;
}
/* TODO: error control not implemented ! */
double Ffermi_sommerfeld(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX)
{
double leading_term, derivative,asymptotic_terms=0.0;
int i,j;
const double etaTBL[12] = {0.50000000000000000000000000000000, \
0.69314718055994530941723212145818, \
0.82246703342411321823620758332301, \
0.90154267736969571404980362113359, \
0.94703282949724591757650323447352, \
0.97211977044690930593565514355347, \
0.98555109129743510409843924448495, \
0.99259381992283028267042571313339, \
0.99623300185264789922728926008280, \
0.99809429754160533076778303185260, \
0.99903950759827156563922184569934, \
0.99951714349806075414409417482869};
//leading_term = pow(eta,1.0+k)/(1.0+k)*hyp2f1(-0.5,1.0+k,2.0+k,-0.5*eta*theta);
leading_term = pow(eta,1.0+k)/(1.0+k)*sommerfeld_leading_term(k,-0.5*eta*theta);
if(SERIES_TERMS_MAX==0) return leading_term;
if(SERIES_TERMS_MAX==1) return leading_term
+ M_PI*M_PI/6.0*(pow(eta,k)*theta/4.0/sqrt(1.0+theta*eta/2.0)+k*pow(eta,k-1.0)*sqrt(1.0+theta*eta/2.0));
for(i=1;i<=SERIES_TERMS_MAX;i++)
{
derivative = 0.0;
for(j=0;j<=2*i-1;j++)
derivative = derivative + binom(2*i-1,j)*tgamma(1.5)*tgamma(1.0+k)/tgamma(1.5-j)/tgamma(2.0+k-2.0*i+j)
*pow(0.5*theta,j)*pow(1.0+0.5*theta*eta,0.5-j)*pow(eta,1.0-2.0*i+j+k);
if(i>5)
asymptotic_terms = asymptotic_terms + derivative*dirichlet_eta(2.0*i,DBL_EPSILON,64);
else
asymptotic_terms = asymptotic_terms + derivative*etaTBL[2*i];
}
return leading_term + 2.0*asymptotic_terms;
}
double Ffermi_series_neg(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX)
{
double sum_old=0.0, sum_new=0.0,x;
int i=0;
x=2.0/theta;
do
{
i++;
sum_old = sum_new;
sum_new += ( i % 2 == 0 ) ? exp(i*eta)*U(k,i*x) : -exp(i*eta)*U(k,i*x);
}
while( ( (precision>0) ? fabs(sum_old-sum_new) >= precision*sum_new: sum_old!=sum_new ) && i<SERIES_TERMS_MAX );
return -sum_new*tgamma(1.0+k)*pow(x,1.0+k);
}
double Ffermi_series_sqrt_a(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX)
{
#include "factorial.h"
int i;
double sum_old=0.0, sum_new=0.0;
//for(i=0;i<SERIES_TERMS_MAX;i++) sum = sum + Ffermi_complete(k+i,eta)*pow(0.5*theta,i)*binom12[i];
i=0;
do
{
sum_old = sum_new;
sum_new += Ffermi_complete(k+i,eta)*pow(0.5*theta,i)*binom12[i];
i++;
}
while( ( (precision>0) ? fabs(sum_old-sum_new) >= precision*sum_new: sum_old!=sum_new ) && i<SERIES_TERMS_MAX );
//printf("\nDBG:\t%e\t%d\n",theta,i);
return sum_new;
}
double Ffermi_series_sqrt_b(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX)
{
#include "factorial.h"
int i;
double sum=0.0;
for(i=0;( (i<SERIES_TERMS_MAX) && (k+0.5-i>-1.0) );i++) sum = sum + Ffermi_complete(k-i+0.5,eta)*pow(0.5*theta,0.5-i)*binom12[i];
//printf("\nDBG:\t%e\t%d\n",theta,i);
return sum;
}
double Ffermi(const double k, const double eta, const double theta)
{
#if 0
if( fmax(1.0+k-log(DBL_EPSILON),eta+1.0+k-log(DBL_EPSILON))*theta<sqrt(DBL_EPSILON) )
{
/* special case for tiny theta relative to 1 and eta */
printf("SPECIAL\t");
return Ffermi_series_sqrt(k, eta, theta);
}
#endif
if( eta>56000.0)
return Ffermi_sommerfeld(k, eta, theta, DBL_EPSILON, 32);
else if( (eta<0.0) && (k>25.0) && (theta>=1.0) )
return Ffermi_series_neg(k, eta, theta, DBL_EPSILON, 32);
else
return Ffermi_value(k,eta,theta,PRECISION_GOAL, MAX_REFINE);
}
long double Ffermi_long(const long double k, const long double eta, const long double theta)
{
return Ffermi_dblexp_long(k,eta,theta,PRECISION_GOAL, MAX_REFINE);
}
|
shape.h | /*
* shape.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef SHAPE_H_
#define SHAPE_H_
#include <cstring>
#include <cstdio>
#include "../dll.h"
#include "../nd4jmalloc.h"
#include "../templatemath.h"
#include "../helpers/logger.h"
#include "../pointercast.h"
#include "../cnpy/cnpy.h"
#include <op_boilerplate.h>
#define MAX_DIMENSION 0x7fffffff
#define MAX_NUM_THREADS 1024
#define MAX_RANK 32
#define MAX_COORD 3
#define PREALLOC_SIZE 33554432
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/sharedmem.h>
#endif
#ifdef __CUDACC__
#define INLINEDEF inline
#else
#define INLINEDEF inline
#endif
#include "../pairwise_util.h"
#include <stdint.h>
#include <array/ArrayOptions.h>
namespace shape {
/**
* Shape information approximating
* the information on an ndarray
*/
struct ND4J_EXPORT ShapeInformation {
_CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0)
: shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_)
{}
Nd4jLong *shape;
Nd4jLong *stride;
char order;
int rank;
int offset;
int elementWiseStride;
};
/**
* Indexing information
* for bounds checking
*/
struct ND4J_EXPORT CurrentIndexing {
int numElementsPerThread;
int blockStartingIndex;
int startingThreadIndex;
int endingThreadIndex;
};
ND4J_EXPORT _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2);
ND4J_EXPORT _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD int sizeAt(Nd4jLong *shape, int dim);
template <typename T>
ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length);
ND4J_EXPORT _CUDA_HD void traceNew(int id);
ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength);
ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder);
ND4J_EXPORT _CUDA_HD bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder, Nd4jLong* target);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output);
//ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer);
#ifdef __CUDACC__
template <typename T>
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager);
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size);
#endif
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret);
ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order);
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy);
ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return -1 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return -1 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer);
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange);
/**
* In place permute swap
* @param length
* @param shape
* @param rearrange
*/
ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange);
ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange);
/**
* Rearrange the permute indexes
* according to which dimensions are specified.
*
* For example, dimension is implicitly:
* 0,1,2
*
* If you want to do a reduce along dimensions 0 and 1,
* you need to permute the indexes to be:
* 2,0,1
*
* which will give us the ability to ierate along an element
* wise stride.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength);
/**
* This method does inplace transpose of given shapeBuffer
*
* @param shapeBuffer
*/
ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer);
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride);
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength);
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of cthe shape
*/
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank);
/**
* When 1 dimension is the whole length of the
* array
*/
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank);
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo);
/**
* Returns the shape portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy);
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes);
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
//ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange);
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer);
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo);
/**
* Returns the rank portion of
* an information buffer
*/
ND4J_EXPORT _CUDA_HD int rank( Nd4jLong *buffer);
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer);
/**
* Returns the stride portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer);
/**
* Compute the length of the given shape
*/
ND4J_EXPORT _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape);
/***
* Returns the offset portion of an information buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer);
/**
* Returns the ordering
* for this shape information buffer
*/
ND4J_EXPORT _CUDA_HD char order(Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
* relative to a dimension and ordering for a reduction index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength);
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info);
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength);
/**
* Iterate over a given set of indexes
* the begin and end indexes are 0 based.
* 1 padding is automatically assumed for the ending.
*
* For example if you want to iterate over 0 to 4
* it will go to 4 rather than 3.
*
* indexes should be the indexes to exclude
* indexes length should be the length of indexes
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end);
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
//#ifdef __CUDACC__
// __device__
//#endif
// ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset);
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo();
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret);
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment);
/**
* Range between from and two with an
* increment of 1
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to);
/**
* Keep the given indexes
* in the data
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength);
/**
* Generate reverse copy of the data
* @param data
* @param length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length);
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length);
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length);
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths);
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank,
int index,
Nd4jLong *shape,
Nd4jLong *tensorShape,
int tensorShapeLength,
int *dimension,
int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2);
/**
* Computes the tensor along dimension
* offset
* @param index the index to get the offset for the tad for
* @param rank the rank of the shapes and strides
* @param info the shape information to use for tad
* @param dimension the dimensions to use for computing the tensor along dimensions
*/
// ND4J_EXPORT _CUDA_HD int offset(int index,
// int rank,
// shape::ShapeInformation *info,
// Nd4jLong *dimension,
// int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank,
volatile int length,
volatile Nd4jLong *shape,
int *dimension,
int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i);
/**
* Computes the number of tads per block
*
*/
ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads);
// ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension,
// int dimensionLength);
/**
* Returns a shape buffer
* for the shape information metadata.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info);
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret);
/**
* Returns the number of elements per thread
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int numElementsPerThread(int N);
/**
* Returns the block starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int blockStartingIndex(int N);
/**
* Returns the thread starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadStartingIndex(int N, int stride, int offset);
/**
* Returns the thread ending index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadEndingIndex(int N, int stride, int offset);
/**
* Returns indexing information
* for the current kernel invocation
*/
//#ifdef __CUDACC__
// __device__
//#endif
// CurrentIndexing *currentIndex(int N, int offset, int stride);
/** Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad);
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal);
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal);
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum);
/**
* Returns the prod of the data
* up to the given length
*/
ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length);
ND4J_EXPORT _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length);
/**
* Returns the rear most left over item not present in
* the dimension array. This assumes that the dimension array is sorted.
*
* For example, given a dimension array of:
* 0,2
*
* and
*
* 12,4,2,1 in data
*
* You end up with 1 (data[3])
* since the first item won't match
* the last item of the dimension array
*/
// ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength);
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices,int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
ND4J_EXPORT _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices);
/**
* Compute the real linear indices for the given shape and stride
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride);
/**
* Compute the real linear indices for the
* given shape buffer. Shape,stride and rank are derived
* from the buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index,Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides);
ND4J_EXPORT _CUDA_HD void printIntArray(Nd4jLong *arr,int length);
ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length);
ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr);
// ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer);
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also sort input array of dimensions, this operation is also necessary for creating TAD object
ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions);
// return absolute index of array min, min is sub-array of max, index to be returned is min index and corresponds to maxIdx of max array
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx);
ND4J_EXPORT _CUDA_HD void shapeScalar(Nd4jLong* const buffer);
ND4J_EXPORT _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer);
ND4J_EXPORT _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order);
//END HEADERS
//BEGIN IMPLEMENTATIONS
#ifdef __CUDACC__
template <typename T>
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) {
// if we go for 3 dimensions coord space or below - just use shared memory for that
if (size <= MAX_COORD * 4) {
Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD);
return ptr;
} else {
// otherwise go to preallocated global memory :(
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid * size > PREALLOC_SIZE - size) {
return (Nd4jLong *) malloc(size);
} else {
Nd4jLong *ret = buffer;
ret += (tid * size);
return ret;
}
}
}
#endif
#ifdef __CUDACC__
/**
* BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES
*/
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) {
Nd4jLong *ret = buffer;
ret += (threadIdx.x * size);
return ret;
}
#endif
/**
* Length of a tad given
* the shape information
*/
INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
int ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
/**
* Tad element wise stride:
* given the inner most dimension (the sorted dimension of the last)
* the element wise stride of the tad (disregarding order) is the
* last dimension's stride.
*
* For a given singular dimension this will just be the only entry.
* For example, given the following c order shape/stride:
* 2,2,3,2
* 12,6,2,1
*
* The tad element wise stride for 3 will be 1.
* For zero it wil be 12
*
* For 2,3 it's 1
*
* Note here that the multi dimensional 2,3 case
* is equivalent to the singular 3 case.
*
*
* Note that this is for the dimension that ultimately
* ends up removed.
*
* Again: this may not preserve ordering of the tad
* but maybe used for reductions.
*/
INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) {
return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength);
}
INLINEDEF _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::shapeEquals(shape::rank(shapeInfo1),shape::shapeOf(shapeInfo1),shape::rank(shapeInfo2),shape::shapeOf(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) {
if(rank1 != rank2)
return false;
for(int i = 0; i < rank1; i++) {
if(stride1[i] != stride2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) {
Nd4jLong *retShape;
int retShapeLength;
if(dimensionLength == 1 && dimension[0] == 2147483647) {
retShape = new Nd4jLong[2];
retShape[0] = 1;
retShape[1] = 1;
retShapeLength = 2;
}
else {
retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength);
retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength;
}
//ensure vector is proper shape
if (retShapeLength == 1) {
if (dimension[0] == 0) {
auto newRetShape = new Nd4jLong[2]{1, retShape[0]};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
else {
auto newRetShape = new Nd4jLong[2]{retShape[0], 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
} else if (retShapeLength == 0) {
auto newRetShape = new Nd4jLong[2]{1, 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
auto ret = shape::shapeBuffer(retShapeLength,retShape);
delete[] retShape;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) {
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
Nd4jLong *theStride = shape::stride(shapeInfo);
int rank = dimensionLength == 1 ? 2 : dimensionLength;
Nd4jLong *ret = buffer;
//set the rank
ret[0] = rank;
Nd4jLong *retShape = shape::shapeOf(ret);
Nd4jLong *retStride = shape::stride(ret);
int len = rank;
if(dimensionLength == 1) {
if(shape::isMatrix(theShape,shape::rank(shapeInfo))) {
if(dimension[0] == 0) {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
else {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong newStride[2] = {1,theStride[dimension[0]]};
Nd4jLong newShape[2] = {1,theShape[dimension[0]]};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong *newIndexes = dimension;
if(reverseCopyStride)
shape::reverseCopyTo(theStride, retStride, newIndexes, len);
else
shape::copyTo(len, theStride, retStride, newIndexes);
shape::copyTo(len, theShape, retShape, newIndexes);
}
ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) {
int rank = dimensionLength == 1 ? 2 : dimensionLength;
traceNew(4);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return createShapeInfo(shape, stride, rank, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) {
buffer[0] = rank;
Nd4jLong *retShape = shape::shapeOf(buffer);
Nd4jLong *retStride = shape::stride(buffer);
for(int i = 0;i < rank; i++) {
retShape[i] = shape[i];
retStride[i] = stride[i];
}
return buffer;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) {
if (isVector(shape, rank)) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[2];
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
traceNew(6);
Nd4jLong *stride = new Nd4jLong[dimensions];
int st = startNum;
for (int j = 0; j < rank; j++) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) {
if (isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
int st = startNum;
for (int j = 0; j < rank; j++) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) {
traceNew(7);
Nd4jLong *stride = new Nd4jLong[rank];
if (rank == 1) {
stride[0] = 1;
return stride;
}
if (shape::isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
stride[i] = 1;
return stride;
}
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) {
if (rank == 1) {
ret[0] = 1;
return ret;
}
if (shape::isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) {
return calcStridesFortran(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStridesFortran(shape, rank, 1, ret);
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) {
return calcStrides(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStrides(shape, rank, 1, ret);
}
INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shape, const char order) {
int rank = shape[0];
int doubleRank = 2*rank;
if (rank > 0)
if(order == 'c') {
shape[doubleRank] = 1; // set unity as last stride for c order
for(int j=1; j<rank; ++j)
shape[doubleRank-j] = shape[doubleRank-j+1]*shape[rank+1-j];
}
else {
shape[rank+1] = 1; // set unity as first stride for f order
for(int j=rank+1; j<doubleRank; ++j)
shape[j+1] = shape[j]*shape[j-rank];
}
// set last 3 elements in shape
shape[doubleRank + 1] = 0;
shape[doubleRank + 2] = 1;
shape[doubleRank + 3] = (int)order;
}
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) {
for(int i=0; i<dimSize-1; ++i)
if(dimensions[i] > dimensions[i+1])
return true;
return false;
}
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) {
auto copy = new ShapeInformation;
traceNew(8);
copy->shape = new Nd4jLong[toCopy->rank];
memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong));
traceNew(9);
copy->stride = new Nd4jLong[toCopy->rank];
for (int i = 0; i < toCopy->rank; i++) {
copy->stride[i] = toCopy->stride[i];
}
copy->order = toCopy->order;
copy->rank = toCopy->rank;
copy->offset = toCopy->offset;
copy->elementWiseStride = toCopy->elementWiseStride;
return copy;
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) {
if (rank == 0)
return 1;
if(shape::isVector(shape,rank)) {
return stride[rank - 1];
}
else {
int oldnd;
Nd4jLong *olddims = shape::copyOf(rank, shape);
Nd4jLong *oldstrides = shape::copyOf(rank, stride);
int np, op, last_stride;
int oi, oj, ok, ni, nj, nk;
traceNew(10);
auto newStrides = new Nd4jLong[rank];
oldnd = 0;
//set the shape to be 1 x length
int newShapeRank = 2;
auto newShape = new Nd4jLong[newShapeRank];
newShape[0] = 1;
newShape[1] = shape::prodLong(shape, rank);
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oi = 0; oi < rank; oi++) {
if (shape[oi] != 1) {
olddims[oldnd] = shape[oi];
oldstrides[oldnd] = stride[oi];
oldnd++;
}
}
np = 1;
for (ni = 0; ni < newShapeRank; ni++) {
np *= newShape[ni];
}
op = 1;
for (oi = 0; oi < oldnd; oi++) {
op *= olddims[oi];
}
if (np != op) {
/* different total sizes; no hope */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
/* oi to oj and ni to nj give the axis ranges currently worked with */
oi = 0;
oj = 1;
ni = 0;
nj = 1;
while (ni < newShapeRank && oi < oldnd) {
np = newShape[ni];
op = olddims[oi];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShape[nj++];
} else {
op *= olddims[oj++];
}
}
/* Check whether the original axes can be combined */
for (ok = oi; ok < oj - 1; ok++) {
if (isFOrder) {
if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
} else {
/* C order */
if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[ni] = oldstrides[oi];
for (nk = ni + 1; nk < nj; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1];
}
} else {
/* C order */
newStrides[nj - 1] = oldstrides[oj - 1];
for (nk = nj - 1; nk > ni; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShape[nk];
}
}
ni = nj++;
oi = oj++;
}
/*
* Set strides corresponding to trailing 1s of the new shape.
*/
if (ni >= 1) {
last_stride = newStrides[ni - 1];
} else {
last_stride = stride[rank - 1];
}
if (isFOrder) {
if (ni >= 1)
last_stride *= newShape[ni - 1];
}
for (nk = ni; nk < newShapeRank; nk++) {
newStrides[nk] = last_stride;
}
//returns the last element of the new stride array
int ret = last_stride;
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return ret;
}
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder,
Nd4jLong *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return stride[dimension[0]];
}
return -1;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape) {
Nd4jLong *stride = shape::calcStrides(shape, rank);
traceNew(11);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'c';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
return shapeInfoBuffer;
}
/**
* This is special method, it returns ONLY 2D shapebuffer.
*
* This method is used only for SoftMax
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer) {
Nd4jLong stride[MAX_RANK];
shape::calcStrides(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'c';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, buffer);
return buffer;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape) {
auto stride = shape::calcStridesFortran(shape,rank);
traceNew(12);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'f';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
return shapeInfoBuffer;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output) {
Nd4jLong stride[MAX_RANK];
shape::calcStridesFortran(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'f';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, output);
return output;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) {
Nd4jLong length = shape::prodLong(shape,rank);
traceNew(13);
Nd4jLong *ret = new Nd4jLong[length];
for(int i = 0; i < length; i++) {
Nd4jLong *idx = shape::ind2sub(rank, shape, i);
ret[i] = shape::getOffset(0, shape, stride, idx, rank);
delete[] idx;
}
return ret;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) {
return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer));
}
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
INLINEDEF _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices) {
int index = 0;
int shift = 1;
for(int i = 0; i < rank; i++) {
index += shift * indices[i];
shift *= shape[i];
}
return index;
}
template <typename T>
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
#pragma omp simd
for (int e = 0; e < length; e++)
buffer[e] = value;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2sub(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index) {
return ind2sub(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) {
int denom = numIndices;
for(int i = rank - 1; i >= 0; i--) {
denom /= shape[i];
ret[i] = index / denom;
index %= denom;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2sub(rank,shape, index, shape::prodLong(shape,rank),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong * ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2subC(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong *ind2subC(int rank, Nd4jLong *shape, Nd4jLong index) {
return ind2subC(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) {
auto denom = numIndices;
for(int i = 0; i < rank; i++) {
denom /= shape[i];
if(denom > 0) {
ret[i] = index / denom;
index %= denom;
}
else
ret[i] = 0;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2subC(rank,shape, index,shape::prodLong(shape,rank),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out) {
if(shape::order(shapeInfo) == 'f') {
shape::ind2sub(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
else {
shape::ind2subC(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong *out) {
ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) {
traceNew(16);
Nd4jLong *ret = new Nd4jLong[length];
for (int i = 0; i < length; i++) {
ret[i] = shape[rearrange[i]];
}
return ret;
}
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) {
if(length == 1) {
return;
}
else {
Nd4jLong *shapeDeref = *shape;
if(shape::prodLong(shapeDeref,length) < 2) {
return;
}
}
bool inOrder = true;
for(int i = 0; i < length - 1; i++) {
inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1];
}
//all in order, nothing to do
if(inOrder)
return;
Nd4jLong *shapeDeref = *shape;
//we know they are just reversed, dimension length of 2
if(length == 2) {
auto shapeFirst = shapeDeref[0];
auto shapeSecond = shapeDeref[1];
shapeDeref[0] = shapeSecond;
shapeDeref[1] = shapeFirst;
return;
}
else if(length == 1) {
//no permute
return;
}
auto temp = new Nd4jLong[length];
memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length);
for (int i = 0; i < length; i++) {
shapeDeref[i] = temp[rearrange[i]];
}
delete[] temp;
}
INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) {
if(shapeBuffer != out)
memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer)));
doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out);
}
INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) {
auto len = shape::shapeInfoLength(shape::rank(shapeBuffer));
Nd4jLong *copy = shape::copyOf(len, shapeBuffer);
doPermuteShapeBuffer(copy,rearrange);
return copy;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[shapeInfoLength(rank) - 2] = -1;
shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1);
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[shapeInfoLength(rank) - 2] = -1;
shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1);
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) {
//no swapping needs to happen
if(shape::isScalar(shapeBuffer)) {
return;
}
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rearrageRank) - 2] = -1;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
// doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large
}
/*
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
auto shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer);
shapeRef[shapeInfoLength(rearrageRank) - 2] = -1;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
}
*/
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange);
shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1);
delete[] rearrangeCopy1;
auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange);
shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2);
shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
shapeBuffer[shape::shapeInfoLength(rank) - 2] = -1;
delete[] rearrangeCopy2;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
if(shapeBuffer != tmpBuffer)
shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rank) - 2] = -1;
shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
}
INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) {
int delta = originalRank - dimensionLength;
traceNew(17);
Nd4jLong *ret = new Nd4jLong[originalRank];
for(int i = 0; i < delta; i++) {
ret[i] = i + dimensionLength;
}
for(int i = delta; i < originalRank; i++) {
ret[i] = i - delta;
}
return ret;
}
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) {
int sd = -1;
int dim = -1;
int i = -1;
int cContiguous = 1;
int isFortran = 1;
sd = 1;
for (i = length - 1; i >= 0; --i) {
dim = shape[i];
if (stride[i] != sd) {
cContiguous = 0;
break;
}
/* contiguous, if it got this far */
if (dim == 0) {
break;
}
sd *= dim;
}
/* check if fortran contiguous */
sd = elementStride;
for (i = 0; i < length; ++i) {
dim = shape[i];
if (stride[i] != sd) {
isFortran = 0;
}
if (dim == 0) {
break;
}
sd *= dim;
}
if (isFortran && cContiguous)
return 'a';
else if (isFortran && !cContiguous)
return 'f';
else if (!isFortran && !cContiguous)
return 'c';
else
return 'c';
}
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) {
if (arrLength != shapeLength)
return -1;
for (int i = 0; i < arrLength; i++) {
if (arr[i] >= arrLength || arr[i] < 0)
return -1;
}
for (int i = 0; i < arrLength; i++) {
for (int j = 0; j < arrLength; j++) {
if (i != j && arr[i] == arr[j])
return -1;
}
}
return 1;
}
INLINEDEF _CUDA_HD void traceNew(int id) {
//printf("new happened: [%i]\n", id);
#ifndef __CUDACC__
//fflush(stdout);
#endif
}
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) {
ShapeInformation *infoDeref = *info;
checkArrangeArray(rearrange, rank, rank);
shape::doPermuteSwap(rank, &infoDeref->shape, rearrange);
shape::doPermuteSwap(rank, &infoDeref->stride, rearrange);
char order = getOrder(rank,
infoDeref->shape,
infoDeref->stride,
infoDeref->elementWiseStride);
infoDeref->order = order;
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) {
if (rank == 0)
return 0;
if (rank == 1)
return 1;
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1 && shapeInfo[0] > 2;
}
INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shapeInfo) {
return isVector(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
INLINEDEF _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && shapeFirstOne;
}
INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && !shapeFirstOne;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) {
for(int i = 0; i < rank; i++) {
if(shape[i] == shape::prod(shape,rank))
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) {
return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) {
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 0;
}
return 1;
}
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) {
return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns the shape portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) {
return buffer + 1;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) {
traceNew(18);
T *ret = new T[length];
return copyOf(length, toCopy, ret);
}
template <typename T>
INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) {
memcpy(ret, toCopy, sizeof(T)*length);
return ret;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) {
memcpy(to, from, sizeof(T)*length);
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) {
for(int i = 0; i < length; i++) {
to[i] = from[indexes[i]];
}
}
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
/*
INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) {
Nd4jLong *strideCopy = copyOf(shapeRank, toPermute);
checkArrangeArray(rearrange, shapeRank, shapeRank);
Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange);
delete[] strideCopy;
return newStride;
}
*/
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) {
return shape + 1;
}
INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) {
return static_cast<int>(shape::shapeOf(shapeBuffer)[0]);
}
INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
int newRank = rank - 1;
if(newRank < 2)
newRank = 2;
Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)];
newShapeBuffer[0] = newRank;
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
Nd4jLong *currStride = shape::stride(shapeBuffer);
//initialize new shape and stride by taking the shape and stride + 1
//and adding to the shape information
//a slice is always just taking the existing shape and cutting the first index off
//of the shape and stride
Nd4jLong *newShape = shape::shapeOf(newShapeBuffer);
Nd4jLong *newStride = shape::stride(newShapeBuffer);
if(shape::isVector(shapeBuffer)) {
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
//row vector: slice index 0 is a valid index, just copy the whole thing
if(currShape[0] == 1) {
if(sliceIdx == 0) {
memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer)));
return newShapeBuffer;
}
}
//column vector: this will be a scalar
else {
delete[] newShapeBuffer;
Nd4jLong *scalar = shape::createScalarShapeInfo();
int offset = shape::offset(shapeBuffer);
scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx;
return scalar;
}
}
else if(shape::isMatrix(shapeBuffer)) {
newShape[0] = 1;
newShape[1] = currShape[1];
newStride[0] = 1;
newStride[1] = currStride[1];
}
else {
for(int i = 0; i < newRank; i++) {
newShape[i] = currShape[i + 1];
newStride[i] = currStride[i + 1];
}
}
auto indices = new Nd4jLong[rank];
memset((void *) indices,0,rank * sizeof(Nd4jLong));
indices[0] = sliceIdx;
Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank);
newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset;
if(shape::isMatrix(shapeBuffer)) {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1];
}
else {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer);
}
newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1);
delete[] indices;
return newShapeBuffer;
}
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
INLINEDEF _CUDA_HD int shapeInfoLength(int rank) {
//FIXME magic numbers
return rank * 2 + 4;
}
INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) {
return shapeInfoLength(shape[0]);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) {
//FIXME magic numbers
return (rank * 2 + 4) * sizeof(Nd4jLong);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo) {
//FIXME magic numbers
return shapeInfoByteLength((int) shapeInfo[0]);
}
/**
* Returns the rank portion of
* an information buffer
*/
INLINEDEF _CUDA_HD int rank( Nd4jLong *buffer) {
return static_cast<int>(buffer[0]);
}
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) {
traceNew(19);
auto info = new ShapeInformation;
auto length = shapeInfoLength(rank(buffer));
auto rank = buffer[0];
//start after rank
info->shape = buffer + 1;
info->stride = buffer + (1 + rank);
info->rank = rank;
info->offset = buffer[length - 3];
info->elementWiseStride = buffer[length - 2];
Nd4jLong *stride = buffer + 1 + rank;
info->stride = stride;
info->order = (char) buffer[length - 1];
return info;
}
/**
* Returns the stride portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *stride( Nd4jLong *buffer) {
return buffer + (1 + rank(buffer));
}
INLINEDEF _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo) {
return ((shape::extra(shapeInfo) & ARRAY_EMPTY) == ARRAY_EMPTY);
}
/**
* Compute the length of the given shape
*/
INLINEDEF _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
if (rank == 0) {
if (isEmpty(shapeInfo))
return 0L;
else
return 1L;
}
if (rank == 1)
return shapeInfo[1];
return shape::prodLong(shape::shapeOf(shapeInfo), rank);
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
/***
* Returns the offset
* portion of an information buffer
*/
INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
/**
* Returns the ordering
* for this shape information buffer
*/
INLINEDEF _CUDA_HD char order(Nd4jLong *buffer) {
//FIXME magic numbers
return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]);
}
/**
* Returns the element wise stride for this information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer) {
return buffer[shapeInfoLength(buffer[0]) - 2];
}
/**
* Returns the element wise stride for this information
* buffer relative to a dimension and reduction index
*/
INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) {
if(dimensionLength > 1) {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
//int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
//return tadElementWiseStride;
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
return 1;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
return 1;
}
}
else {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
}
}
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) {
const int rank = shape::rank(info);
if(rank > 2)
return 0;
if(rank == 0)
return 1;
if(rank == 1)
return shape::shapeOf(info)[0] == 1;
if(rank == 2)
return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1;
return 0;
}
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) {
const int rank = info->rank;
if(rank > 2)
return 0;
if(rank == 1)
return info->shape[0] == 1;
if(rank == 2)
return info->shape[0] == 1 && info->shape[1] == 1;
return 0;
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) {
int count = 0;
int absLength = dataLength - indexesLength;
for (int i = 0; i < dataLength && count < absLength; i++) {
int contains = 0;
for (int j = 0; j < indexesLength; j++) {
if (i == indexes[j]) {
contains = 1;
break;
}
}
if (!contains) {
ret[count] = data[i];
count++;
}
}
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) {
auto lengthOfArr = dataLength - indexesLength;
if(lengthOfArr < 0) {
printf("Remove index call created a <= 0 length array. This was likely not intended.");
}
auto ret = new T1[lengthOfArr];
memset(ret,0,sizeof(T1) * lengthOfArr);
removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) {
int len = end - indexesLength;
traceNew(20);
auto ret = new Nd4jLong[len];
int retIdx = 0;
//not here that we do 0 based indexing for end - this assumes things like:
//0 to 4 are specified
for(int i = begin; i < end ; i++) {
bool found = false;
for(int j = 0; j < indexesLength; j++) {
if(indexes[j] == i) {
found = true;
break;
}
}
if(!found) {
ret[retIdx++] = i;
}
}
return ret;
}
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
#ifdef __CUDACC__
INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) {
return offset + threadIdx.x * xInfo->elementWiseStride;
}
#endif
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) {
traceNew(21);
Nd4jLong *ret = new Nd4jLong[2];
if (dimension == 0) {
ret[0] = 1;
ret[1] = shape[0];
} else {
ret[0] = shape[0];
ret[1] = 1;
}
return ret;
}
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) {
return ensureVectorShape(shape, 0);
}
/**
* This method does STRICT comparison for two shape buffers
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD int sizeAt(Nd4jLong *shape, int dim) {
if (dim >= 0)
return shape[1+dim];
else
return shape[1+(rank(shape) + dim)];
}
/**
* This method does SOFT comparison for two shape buffers, we compare only rank & shapes
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we compare only shapes, and ignoring stride & ews
auto length = shapeA[0];
for (int e = 1; e <= length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to, int increment) {
int diff = nd4j::math::nd4j_abs<int>(from - to);
int retLength = diff / increment;
T *ret;
traceNew(22);
if(diff / increment < 1)
ret = new T[1];
else
ret = new T[diff / increment];
if (from < to) {
int count = 0;
for (int i = from; i < to; i += increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
} else if (from > to) {
int count = 0;
for (int i = from - 1; i >= to; i -= increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
}
return ret;
}
/**
* Generate a range
* beginning at from and ending at to
* incrementing by 1
* @param from the start
* @param to the end
* @return the int array starting at from and ending at to
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to) {
return range<T>(from, to, 1);
}
/**
* Keep the given indexes in the data
* @param data
* @param index
* @param indexLength
* @param dataLength
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) {
traceNew(23);
Nd4jLong *ret = new Nd4jLong[indexLength];
int count = 0;
for (int i = 0; i < dataLength; i++) {
int contains = 0;
for (int j = 0; j < indexLength; j++) {
if (i == index[j]) {
contains = 1;
break;
}
}
if (contains)
ret[count++] = data[i];
}
return ret;
}
/**
* Generate a reverse
* copy of the data
*/
template <typename T>
INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) {
if (length < 1)
return nullptr;
traceNew(24);
T *copy = new T[length];
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = data[i];
copy[i] = data[length - i - 1];
copy[length - i - 1] = temp;
}
return copy;
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[i];
to[i] = from[length - i - 1];
to[length - i - 1] = temp;
}
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[indexes[i]];
to[i] = from[indexes[length - i - 1]];
to[length - i - 1] = temp;
}
}
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) {
traceNew(25);
T *ret = new T[arr1Length + arr2Length];
std::memcpy(ret, arr1, arr1Length * sizeof(T));
std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T));
return ret;
}
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) {
T* ret = new T[numTotalElements];
Nd4jLong count = 0;
for (Nd4jLong i = 0; i < numArrays; i++) {
for (Nd4jLong j = 0; j < lengths[i]; j++) {
ret[count++] = arr[i][j];
}
}
return ret;
}
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) {
if(shape::isVector(shape,rank)) {
//return total length for row vectors
if(dimensionLength == 1 && shape[0] == 1) {
return shape::prod(shape,rank);
}
}
else if(rank == dimensionLength)
return shape::prod(shape,rank);
int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength);
traceNew(27);
auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength);
auto ret = prodLong(ret2, absSelta);
delete[] ret2;
return ret;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) {
auto tensorLength = prodLong(tensorShape, tensorShapeLength);
auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength);
if (lengthPerSlice2 <= 0) {
return 0;
}
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) {
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
#ifdef __CUDACC__
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) {
return offset + threadIdx.x * elementWiseStride(xInfo);
}
#endif
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length,
volatile Nd4jLong *shape, int *dimension, int dimensionLength) {
Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank);
Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
Nd4jLong *keepShape = shape::shapeOf(shapeInfo);
Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo));
Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices, int rank) {
Nd4jLong offset = baseOffset;
for(int i = 0; i < rank; i++) {
if(indices[i] >= shape[i] && shape[i] != 1) {
#ifdef __CUDA_ARCH__
printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]);
#else
printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]);
#endif
#ifdef __CUDA_ARCH__
if (threadIdx.x == 0 && blockIdx.x == 0)
printShapeInfoLinear("getOffsetFailed", rank, shape, stride);
#endif
return -1;
}
if(shape[i] != 1) {
offset += indices[i] * stride[i];
}
}
return offset;
}
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) {
return blockIdx + i * blockSize;
}
/**
* Computes the number of tads per block
*
*/
INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) {
return (int) nd4j::math::nd4j_ceil<double>(tads / (double) blockSize);
}
/**
* Returns a shape buffer
* for the shape information metadata.
*/
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) {
traceNew(29);
auto ret = new Nd4jLong[shapeInfoLength(info->rank)];
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count] = info->order;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) {
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
if (ret[0] == 0) {
ret[1] = 0;
ret[2] = 1;
ret[3] = 99;
return ret;
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count++] = info->order;
return ret;
}
INLINEDEF _CUDA_HD void printIntArray(Nd4jLong *arr,int length) {
for(int i = 0; i < length; i++) {
printf(" %lld ", (long long) arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
Nd4jLong *shape = shape::shapeOf(shapeInfo);
printf("Rank %d\n",rank);
printf("Shape:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ",(long long) shape[i]);
}
printf("\n");
Nd4jLong *stride = shape::stride(shapeInfo);
printf("Stride:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ", (long long) stride[i]);
}
printf("\n");
printf("Order %c\n",shape::order(shapeInfo));
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("ShapeInfo: [");
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides) {
printf("%s : [", msg);
for (int i = 0; i < rank; i++) {
printf("%lld, ", (long long) shape[i]);
}
for (int i = 0; i < rank; i++) {
printf("%lld", (long long) strides[i]);
if (i < rank - 1)
printf(", ");
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("%s : [", msg);
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printArray(float *arr,int length) {
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
}
/**
* Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) {
return i / (numElementsPerTad * elementWiseStride);
}
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal) {
if (tadIndexForOriginal == 0)
return 0;
return tadIndexForOriginal / (tadsForOriginal / tadsForReduced);
}
INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *shape = shape::shapeOf(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
// swap shape
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = shape[idx2];
shape[idx2] = shape[idx1];
shape[idx1] = tmp;
}
// swap strides
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = strides[idx2];
strides[idx2] = strides[idx1];
strides[idx1] = tmp;
}
if (shape::order(shapeBuffer) == 'c')
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102;
else
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99;
}
/**
* Tad index for linear
* @param linearIndex
* @param tadLength
* @return
*/
INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) {
return linearIndex % tadLength;
}
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) {
return tadsForOriginal / tadsForReduce;
}
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum) {
int tad = tadIndex(i, elementWiseStride, numElementsPerTad);
return reductionIndexForTad(tad, tadNum, originalTadNum);
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() {
traceNew(30);
auto shape = new Nd4jLong[1];
shape[0] = 1;
auto stride = new Nd4jLong[1];
stride[0] = 1;
auto shapeInformation2 = new ShapeInformation();
shapeInformation2->rank = 1;
shapeInformation2->offset = 0;
shapeInformation2->stride = stride;
shapeInformation2->shape = shape;
shapeInformation2->elementWiseStride = 1;
shapeInformation2->order = 99;
Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2);
delete shapeInformation2;
delete[] shape;
delete[] stride;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) {
ret[0] = 2;
ret[1] = 1;
ret[2] = 1;
ret[3] = 1;
ret[4] = 1;
ret[5] = 0;
ret[6] = 1;
ret[7] = 99;
return ret;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) {
int prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length) {
Nd4jLong prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) {
Nd4jLong *stride = shape::stride(data);
//corner case: return the final item when its greater than the max, since its guaranteed to be left over
//note here that strides are interpreted in reverse for tad
//start from the front rather than the back
int rank = shape::rank(data);
if(shape::order(data) == 'f') {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
else {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
int ret = stride[0];
return ret;
}
#ifdef __CUDACC__
__device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) {
// we read first element, to find out length of our shapeInfoBuffer
int rank = shapeInfoBuffer[0];
int len = shape::shapeInfoLength(rank);
for (int i = threadIdx.x; i < len; i += blockDim.x)
targetBuffer[i] = shapeInfoBuffer[i];
}
#endif
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) {
return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder);
}
// INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) {
// unsigned Nd4jLong *shape;
// unsigned int ndims, wordSize;
// bool fortranOrder;
// cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder);
// Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder);
// delete[] shape;
// return ret;
// }
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) {
if(fortranOrder) {
Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank,(Nd4jLong *) shape);
return shapeBufferRet;
}
else {
Nd4jLong *newShape = new Nd4jLong[rank];
for(int i = 0; i < rank; i++) {
newShape[i] = shape[i];
}
Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank,newShape);
delete[] newShape;
return shapeBufferRet;
}
}
INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
char order = shape::order(shapeBuffer);
if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1)
return true;
if (order == 'c') {
for (int i = 1; i < rank; i++)
if (strides[i-1] <= strides[i])
return false;
return true;
} else if (order == 'f') {
for (int i = 1; i < rank; i++)
if (strides[i-1] >= strides[i])
return false;
return true;
} else {
printf("Unknown order for array!\n");
return false;
}
}
INLINEDEF _CUDA_H bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) {
int oldnd;
Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oi, oj, ok, ni, nj, nk;
Nd4jLong* newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oi = 0; oi < oldRank; oi++) {
if (shape::shapeOf(oldShape)[oi] != 1) {
olddims[oldnd] = shape::shapeOf(oldShape)[oi];
oldstrides[oldnd] = shape::stride(oldShape)[oi];
oldnd++;
}
}
np = 1;
for (ni = 0; ni < newRank; ni++) {
np *= newShapeOf[ni];
}
op = 1;
for (oi = 0; oi < oldnd; oi++) {
op *= olddims[oi];
}
if (np != op) {
/* different total sizes; no hope */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
/* oi to oj and ni to nj give the axis ranges currently worked with */
oi = 0;
oj = 1;
ni = 0;
nj = 1;
while (ni < newRank && oi < oldnd) {
np = newShapeOf[ni];
op = olddims[oi];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[nj++];
} else {
op *= olddims[oj++];
}
}
/* Check whether the original axes can be combined */
for (ok = oi; ok < oj - 1; ok++) {
if (isFOrder) {
if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[ni] = oldstrides[oi];
for (nk = ni + 1; nk < nj; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[nj - 1] = oldstrides[oj - 1];
for (nk = nj - 1; nk > ni; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
ni = nj++;
oi = oj++;
}
if (ni >= 1) {
last_stride = newStrides[ni - 1];
} else {
last_stride = shape::elementWiseStride(oldShape);
}
if (isFOrder && ni >= 1) {
last_stride *= newShapeOf[ni - 1];
}
for (nk = ni; nk < newRank; nk++) {
newStrides[nk] = last_stride;
}
target[0] = newRank;
int cnt = 1;
for (int e = 0; e < newRank; e++)
target[cnt++] = newShapeOf[e];
for (int e = 0; e < newRank; e++)
target[cnt++] = newStrides[e];
target[shape::shapeInfoLength(newRank) - 3] = 0;
target[shape::shapeInfoLength(newRank) - 2] = -1;
target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99;
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return true;
}
INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) {
int oldnd;
Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oi, oj, ok, ni, nj, nk;
auto newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oi = 0; oi < oldRank; oi++) {
if (shape::shapeOf(oldShape)[oi] != 1) {
olddims[oldnd] = shape::shapeOf(oldShape)[oi];
oldstrides[oldnd] = shape::stride(oldShape)[oi];
oldnd++;
}
}
np = 1;
for (ni = 0; ni < newRank; ni++) {
np *= newShapeOf[ni];
}
op = 1;
for (oi = 0; oi < oldnd; oi++) {
op *= olddims[oi];
}
if (np != op) {
/* different total sizes; no hope */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
/* oi to oj and ni to nj give the axis ranges currently worked with */
oi = 0;
oj = 1;
ni = 0;
nj = 1;
while (ni < newRank && oi < oldnd) {
np = newShapeOf[ni];
op = olddims[oi];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[nj++];
} else {
op *= olddims[oj++];
}
}
/* Check whether the original axes can be combined */
for (ok = oi; ok < oj - 1; ok++) {
if (isFOrder) {
if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[ni] = oldstrides[oi];
for (nk = ni + 1; nk < nj; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[nj - 1] = oldstrides[oj - 1];
for (nk = nj - 1; nk > ni; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
ni = nj++;
oi = oj++;
}
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return true;
}
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also it sorts input array of dimensions, this operation is also necessary for creating TAD object
INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) {
int dimSize = dimensions.size();
if(dimSize == 0)
throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!");
// check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim|
for(auto& dim : dimensions)
if(dim < 0)
dim += rank;
// sort input array of dimensions, this operation is also necessary for creating TAD object in external methods
if (dimSize > 1) {
std::sort(dimensions.begin(), dimensions.end());
// remove duplicates if they are present
dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end());
}
// check whether number of dimensions is to big (>rank)
dimSize = dimensions.size();
if(dimSize > rank)
throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!");
// check if min dimension is still negative and whether max dimension is bigger then rank-1
if(dimensions[0] < 0 || dimensions.back() > (rank-1))
throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !");
}
// return absolute index of array min, min is sub-array of max, index to be returned is min's index and corresponds to maxIdx of max array
INLINEDEF _CUDA_H Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx) {
const int rankMax = maxShapeInfo[0];
const int rankMin = minShapeInfo[0];
auto* idxPerRank = new Nd4jLong[rankMax];
ind2subC(rankMax, const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<int&>(maxIdx), idxPerRank);
Nd4jLong minIdx = 0;
for(int i = 0; i < rankMin; ++i) {
if(minShapeInfo[rankMin - i] == 1 || idxPerRank[rankMax - i - 1] == 0)
continue;
if(idxPerRank[rankMax - i - 1] >= minShapeInfo[rankMin - i])
idxPerRank[rankMax - i - 1] %= minShapeInfo[rankMin - i];
minIdx += idxPerRank[rankMax - i - 1] * stride(const_cast<Nd4jLong*>(minShapeInfo))[rankMin - i - 1];
}
delete[] idxPerRank;
return minIdx;
}
INLINEDEF _CUDA_HD void shapeScalar(Nd4jLong* const buffer) {
buffer[0] = 0;
buffer[1] = 0;
buffer[2] = 1;
buffer[3] = 99;
}
INLINEDEF _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order) {
buffer[0] = 2;
buffer[1] = 1;
buffer[2] = 1;
buffer[3] = 1;
buffer[4] = 1;
buffer[5] = 0;
buffer[6] = 1;
buffer[7] = (int)order;
}
INLINEDEF _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer) {
buffer[0] = 1;
buffer[1] = length;
buffer[2] = 1;
buffer[3] = 0;
buffer[4] = 1;
buffer[5] = 99;
}
template <typename T1, typename T2>
INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) {
for (Nd4jLong e = 0; e < length; e++)
to[e] = (T2) from[e];
};
}
#endif /* SHAPE_H_ */
|
intra_pred.c | #ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <float.h>
#include <stdlib.h>
#include <sys/time.h>
/* for fallback to the otherwise obsolete ftime on windows */
#if ! HAVE_GETTIMEOFDAY && HAVE_FTIME
#include <sys/timeb.h>
#endif
#include "cholesky.h"
#include "od_defs.h"
#include "od_covmat.h"
#include "od_filter.h"
#include "od_intra.h"
#include "image_tools.h"
#include "stats_tools.h"
#include "svd.h"
#define USE_SVD (0)
#define BITS_SELECT (0)
#define USE_WEIGHTS (0)
#define SUBTRACT_DC (0)
#define POOLED_COV (1)
#define MAKE_SPARSE (1)
#define DROP_BY_MAG (0)
#define TF_MASKING (1)
#define WRITE_IMAGES (0)
#define PRINT_PROGRESS (0)
#define PRINT_BLOCKS (0)
#define PRINT_COMP (0)
#define PRINT_DROPS (0)
#define PRINT_BETAS (0)
static void od_gettime(struct timeval *tv){
#if HAVE_GETTIMEOFDAY
gettimeofday(tv,NULL);
#elif HAVE_FTIME
struct timeb ft;
ftime(&ft);
tv->tv_sec=ft.time;
tv->tv_usec=ft.millitm*1000;
#else
#error No suitable wall time function available.
#endif
}
typedef struct classify_ctx classify_ctx;
struct classify_ctx{
int n;
intra_stats st;
intra_stats gb;
od_covmat pd[OD_INTRA_NMODES];
image_data img;
#if WRITE_IMAGES
image_files files;
#endif
double bits;
};
static void classify_ctx_init(classify_ctx *_this){
int i;
_this->n=0;
intra_stats_init(&_this->st,B_SZ_LOG);
intra_stats_init(&_this->gb,B_SZ_LOG);
for(i=0;i<OD_INTRA_NMODES;i++){
od_covmat_init(&_this->pd[i],5*B_SZ*B_SZ);
}
}
static void classify_ctx_clear(classify_ctx *_this){
int i;
intra_stats_clear(&_this->st);
intra_stats_clear(&_this->gb);
for(i=0;i<OD_INTRA_NMODES;i++){
od_covmat_clear(&_this->pd[i]);
}
}
static void classify_ctx_reset(classify_ctx *_this){
int i;
_this->n=0;
intra_stats_reset(&_this->gb);
for(i=0;i<OD_INTRA_NMODES;i++){
od_covmat_reset(&_this->pd[i]);
}
_this->bits=0;
}
static void classify_ctx_set_image(classify_ctx *_this,const char *_name,
int _nxblocks,int _nyblocks){
_this->n++;
intra_stats_reset(&_this->st);
image_data_init(&_this->img,_name,B_SZ_LOG,_nxblocks,_nyblocks);
#if WRITE_IMAGES
image_files_init(&_this->files,_nxblocks,_nyblocks);
#endif
}
static void classify_ctx_clear_image(classify_ctx *_this){
image_data_clear(&_this->img);
#if WRITE_IMAGES
image_files_clear(&_this->files);
#endif
}
typedef struct prob_ctx prob_ctx;
struct prob_ctx{
double *scale;
double *xtx;
double *xty;
const double *mean;
const double *cov;
double *ete;
};
static void prob_ctx_init(prob_ctx *_this){
_this->scale=(double *)malloc(sizeof(*_this->scale)*5*B_SZ*B_SZ);
_this->xtx=(double *)malloc(sizeof(*_this->xtx)*4*B_SZ*B_SZ*4*B_SZ*B_SZ);
_this->xty=(double *)malloc(sizeof(*_this->xty)*4*B_SZ*B_SZ*B_SZ*B_SZ);
_this->mean=NULL;
_this->cov=NULL;
_this->ete=(double *)malloc(sizeof(*_this->ete)*B_SZ*B_SZ*B_SZ*B_SZ);
}
static void prob_ctx_clear(prob_ctx *_this){
free(_this->scale);
free(_this->xtx);
free(_this->xty);
_this->mean=NULL;
_this->cov=NULL;
free(_this->ete);
}
static void prob_ctx_load(prob_ctx *_this,od_covmat *_mat){
int i;
int j;
/* compute the scale factors */
for(i=0;i<5*B_SZ*B_SZ;i++){
_this->scale[i]=sqrt(_mat->cov[i*5*B_SZ*B_SZ+i]);
}
/* normalize X^T*X and X^T*Y */
for(j=0;j<4*B_SZ*B_SZ;j++){
for(i=0;i<4*B_SZ*B_SZ;i++){
_this->xtx[4*B_SZ*B_SZ*j+i]=
_mat->cov[5*B_SZ*B_SZ*j+i]/(_this->scale[j]*_this->scale[i]);
}
for(i=0;i<B_SZ*B_SZ;i++){
_this->xty[B_SZ*B_SZ*j+i]=_mat->cov[5*B_SZ*B_SZ*j+4*B_SZ*B_SZ+i];
_this->xty[B_SZ*B_SZ*j+i]/=_this->scale[j]*_this->scale[4*B_SZ*B_SZ+i];
}
}
_this->mean=_mat->mean;
_this->cov=_mat->cov;
}
static void prob_ctx_comp_error(prob_ctx *_this,od_covmat *_mat,double *_beta_1){
int j;
int i;
for(j=0;j<B_SZ*B_SZ;j++){
for(i=0;i<B_SZ*B_SZ;i++){
int ji;
int l;
int k;
ji=B_SZ*B_SZ*j+i;
l=5*B_SZ*B_SZ*(4*B_SZ*B_SZ+j);
/* E^T*E = Y^T*Y - Y^T*X * beta_1 */
_this->ete[ji]=_mat->cov[l+4*B_SZ*B_SZ+i];
for(k=0;k<4*B_SZ*B_SZ;k++){
_this->ete[ji]-=_mat->cov[l+k]*_beta_1[4*B_SZ*B_SZ*i+k];
}
}
}
#if PRINT_COMP
fprintf(stderr,"ete=[");
for(j=0;j<B_SZ*B_SZ;j++){
fprintf(stderr,"%s",j!=0?";":"");
for(i=0;i<B_SZ*B_SZ;i++){
fprintf(stderr,"%s%- 24.18G",i!=0?",":"",_this->ete[B_SZ*B_SZ*j+i]);
}
}
fprintf(stderr,"];\n");
#endif
}
static void update_diversity(const double *_ete,double _b[B_SZ*B_SZ],
const double *_scale){
int v;
int u;
for(v=0;v<B_SZ;v++){
for(u=0;u<B_SZ;u++){
int i;
int ii;
i=B_SZ*v+u;
ii=B_SZ*B_SZ*i+i;
_b[i]=sqrt(_scale[v]*_scale[u]*_ete[ii]/2);
}
}
}
#if PRINT_BETAS && POOLED_COV
static void print_diversity(FILE *_fp,double _b[B_SZ*B_SZ],
const double *_scale){
int j;
int i;
fprintf(_fp,"const ogg_int16_t OD_SATD_WEIGHTS_%dx%d[%d*%d] = {\n",B_SZ,B_SZ,B_SZ,B_SZ);
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
double b;
b=M_LOG2E/(_b[j*B_SZ+i]/(sqrt(_scale[i]*_scale[j])*INPUT_SCALE));
fprintf(_fp,"%s%3i%s",
i>0?" ":" ",(int)(b*256+0.5),j==B_SZ-1&&i==B_SZ-1?"":",");
}
fprintf(_fp,"\n");
}
fprintf(_fp,"};\n");
}
#endif
typedef struct solve_ctx solve_ctx;
struct solve_ctx{
#if USE_SVD
double *xtx;
double **xtxp;
double *s;
#else
double *r;
int *pivot;
double *tau;
double *b;
double *work;
#endif
double *beta_0;
double *beta_1;
};
static void solve_ctx_init(solve_ctx *_this){
#if USE_SVD
_this->xtx=(double *)malloc(sizeof(*_this->xtx)*2*4*B_SZ*B_SZ*4*B_SZ*B_SZ);
_this->xtxp=(double **)malloc(sizeof(*_this->xtxp)*2*4*B_SZ*B_SZ);
_this->s=(double *)malloc(sizeof(*_this->s)*4*B_SZ*B_SZ);
#else
_this->r=(double *)malloc(sizeof(*_this->r)*UT_SZ(4*B_SZ*B_SZ,4*B_SZ*B_SZ));
_this->pivot=(int *)malloc(sizeof(*_this->pivot)*4*B_SZ*B_SZ);
_this->tau=(double *)malloc(sizeof(*_this->tau)*4*B_SZ*B_SZ);
_this->b=(double *)malloc(sizeof(*_this->b)*4*B_SZ*B_SZ);
_this->work=(double *)malloc(sizeof(*_this->work)*4*B_SZ*B_SZ);
#endif
_this->beta_0=(double *)malloc(sizeof(*_this->beta_0)*B_SZ*B_SZ);
_this->beta_1=(double *)malloc(sizeof(*_this->beta_1)*B_SZ*B_SZ*4*B_SZ*B_SZ);
}
static void solve_ctx_clear(solve_ctx *_this){
#if USE_SVD
free(_this->xtx);
free(_this->xtxp);
free(_this->s);
#else
free(_this->r);
free(_this->pivot);
free(_this->tau);
free(_this->b);
free(_this->work);
#endif
free(_this->beta_0);
free(_this->beta_1);
}
/* solve for beta_0[_y] and beta_1[_y] */
static void solve(const prob_ctx *_prob,solve_ctx *_sol,int _y,int *_mask,
double *_beta_0,double *_beta_1){
int nmi;
int mi[4*B_SZ*B_SZ];
int i;
int j;
#if !USE_SVD
int rank;
#endif
nmi=0;
for(i=0;i<4*B_SZ*B_SZ;i++){
if(_mask[i]){
mi[nmi]=i;
nmi++;
}
_beta_1[_y*4*B_SZ*B_SZ+i]=0;
}
#if USE_SVD
for(j=0;j<nmi;j++){
for(i=0;i<nmi;i++){
_sol->xtx[4*B_SZ*B_SZ*j+i]=_prob->xtx[4*B_SZ*B_SZ*mi[j]+mi[i]];
}
}
for(i=0;i<2*nmi;i++){
_sol->xtxp[i]=&_sol->xtx[4*B_SZ*B_SZ*i];
}
svd_pseudoinverse(_sol->xtxp,_sol->s,nmi,nmi);
#else
for(j=0;j<nmi;j++){
for(i=j;i<nmi;i++){
_sol->r[UT_IDX(j,i,nmi)]=_prob->xtx[4*B_SZ*B_SZ*mi[j]+mi[i]];
}
_sol->b[j]=_prob->xty[B_SZ*B_SZ*mi[j]+_y];
}
rank=cholesky(_sol->r,_sol->pivot,DBL_EPSILON,nmi);
chdecomp(_sol->r,_sol->tau,rank,nmi);
chsolve(_sol->r,_sol->pivot,_sol->tau,_sol->b,_sol->b,_sol->work,rank,nmi);
#endif
/* compute beta_1 = (X^T*X)^-1 * X^T*Y and beta_0 = Ym - Xm * beta_1 */
_beta_0[_y]=_prob->mean[4*B_SZ*B_SZ+_y];
for(j=0;j<nmi;j++){
int yj;
yj=_y*4*B_SZ*B_SZ+mi[j];
#if USE_SVD
_beta_1[yj]=0;
for(i=0;i<nmi;i++){
_beta_1[yj]+=_sol->xtx[4*B_SZ*B_SZ*j+i]*_prob->xty[B_SZ*B_SZ*mi[i]+_y];
}
#else
_beta_1[yj]=_sol->b[j];
#endif
_beta_1[yj]*=_prob->scale[4*B_SZ*B_SZ+_y]/_prob->scale[mi[j]];
_beta_0[_y]-=_prob->mean[mi[j]]*_beta_1[yj];
}
}
static double comp_error(const prob_ctx *_prob,solve_ctx *_sol,int _y,
int *_mask){
double err;
int j;
int i;
solve(_prob,_sol,_y,_mask,_sol->beta_0,_sol->beta_1);
j=4*B_SZ*B_SZ+_y;
err=_prob->cov[5*B_SZ*B_SZ*j+j];
for(i=0;i<4*B_SZ*B_SZ;i++){
if(_mask[i]){
err-=_prob->cov[5*B_SZ*B_SZ*j+i]*_sol->beta_1[4*B_SZ*B_SZ*_y+i];
}
}
return(err);
}
static int comp_delta_pg(const prob_ctx *_prob,solve_ctx _sol[NUM_PROCS],int _y,
int _mask[4*B_SZ*B_SZ],double *_delta_pg){
double s;
int i;
int j;
int nmi;
int mi[4*B_SZ*B_SZ];
int mask[NUM_PROCS][4*B_SZ*B_SZ];
double delta_pg[4*B_SZ*B_SZ];
nmi=0;
for(i=0;i<4*B_SZ*B_SZ;i++){
if(_mask[i]){
mi[nmi]=i;
nmi++;
}
for(j=0;j<NUM_PROCS;j++){
mask[j][i]=_mask[i];
}
}
s=1/comp_error(_prob,&_sol[0],_y,_mask);
#pragma omp parallel for schedule(dynamic)
for(i=0;i<nmi;i++){
int tid;
tid=OD_OMP_GET_THREAD;
mask[tid][mi[i]]=0;
delta_pg[i]=comp_error(_prob,&_sol[tid],_y,mask[tid])*s;
mask[tid][mi[i]]=1;
}
#if SUBTRACT_DC
if(_y==0) {
for(i=0;i<nmi;i++){
if(mi[i]%(B_SZ*B_SZ)==0){
delta_pg[i]=UINT_MAX;
}
}
}
#endif
j=-1;
for(i=0;i<nmi;i++){
if(j==-1||delta_pg[i]<delta_pg[j]){
j=i;
}
}
if(j==-1){
return j;
}
*_delta_pg=delta_pg[j];
return(mi[j]);
}
static long timing(const struct timeval *_start,const struct timeval *_stop){
long ms;
ms=(_stop->tv_sec-_start->tv_sec)*1000;
ms+=(_stop->tv_usec-_start->tv_usec)/1000;
return ms;
}
static void comp_predictors(const prob_ctx *_prob,solve_ctx _sol[NUM_PROCS],
int _drop,int _mask[B_SZ*B_SZ*4*B_SZ*B_SZ]){
int i;
int j;
#if PRINT_COMP
fprintf(stderr,"xtx=[");
for(j=0;j<4*B_SZ*B_SZ;j++){
fprintf(stderr,"%s",j!=0?";":"");
for(i=0;i<4*B_SZ*B_SZ;i++){
fprintf(stderr,"%s%- 24.18G",i!=0?",":"",_prob->xtx[4*B_SZ*B_SZ*j+i]);
}
}
fprintf(stderr,"];\n");
fprintf(stderr,"xty=[");
for(j=0;j<4*B_SZ*B_SZ;j++){
fprintf(stderr,"%s",j!=0?";":"");
for(i=0;i<B_SZ*B_SZ;i++){
fprintf(stderr,"%s%- 24.18G",i!=0?",":"",_prob->xty[B_SZ*B_SZ*j+i]);
}
}
fprintf(stderr,"];\n");
#endif
if(_drop>0){
#if DROP_BY_MAG
double min[B_SZ*B_SZ];
int idx[B_SZ*B_SZ];
#pragma omp parallel for schedule(dynamic)
for(j=0;j<B_SZ*B_SZ;j++){
int tid;
tid=OD_OMP_GET_THREAD;
solve(_prob,&_sol[tid],j,&_mask[j*4*B_SZ*B_SZ],_sol->beta_0,_sol->beta_1);
}
for(j=0;j<B_SZ*B_SZ;j++){
idx[j]=-1;
min[j]=UINT_MAX;
for(i=0;i<4*B_SZ*B_SZ;i++){
if(_mask[4*B_SZ*B_SZ*j+i]&&fabs(_sol->beta_1[4*B_SZ*B_SZ*j+i])<min[j]){
idx[j]=i;
min[j]=fabs(_sol->beta_1[4*B_SZ*B_SZ*j+i]);
}
}
}
while(_drop-->0){
j=-1;
for(i=0;i<B_SZ*B_SZ;i++){
if(idx[i]!=-1&&(j==-1||min[i]<min[j])){
j=i;
}
}
#if PRINT_DROPS
printf("Dropping (%2i,%2i) with beta_1=%g\n",j,idx[j],min[j]);
fflush(stdout);
#endif
_mask[4*B_SZ*B_SZ*j+idx[j]]=0;
idx[j]=-1;
min[j]=UINT_MAX;
for(i=0;i<4*B_SZ*B_SZ;i++){
if(_mask[4*B_SZ*B_SZ*j+i]&&fabs(_sol->beta_1[4*B_SZ*B_SZ*j+i])<min[j]){
idx[j]=i;
min[j]=fabs(_sol->beta_1[4*B_SZ*B_SZ*j+i]);
}
}
}
#else
double delta_pg[B_SZ*B_SZ];
int idx[B_SZ*B_SZ];
for(j=0;j<B_SZ*B_SZ;j++){
idx[j]=comp_delta_pg(_prob,_sol,j,&_mask[j*4*B_SZ*B_SZ],&delta_pg[j]);
}
while(_drop-->0){
j=-1;
for(i=0;i<B_SZ*B_SZ;i++){
if(idx[i]!=-1&&(j==-1||delta_pg[i]<delta_pg[j])){
j=i;
}
}
#if PRINT_DROPS
printf("Dropping (%2i,%2i) cost Pg=%g\n",j,idx[j],10*log10(delta_pg[j]));
fflush(stdout);
#endif
_mask[j*4*B_SZ*B_SZ+idx[j]]=0;
idx[j]=comp_delta_pg(_prob,_sol,j,&_mask[j*4*B_SZ*B_SZ],&delta_pg[j]);
}
#endif
}
#pragma omp parallel for schedule(dynamic)
for(j=0;j<B_SZ*B_SZ;j++){
int tid;
tid=OD_OMP_GET_THREAD;
solve(_prob,&_sol[tid],j,&_mask[j*4*B_SZ*B_SZ],_sol->beta_0,_sol->beta_1);
}
#if PRINT_COMP
fprintf(stderr,"beta_1=[");
for(j=0;j<4*B_SZ*B_SZ;j++){
fprintf(stderr,"%s",j!=0?";":"");
for(i=0;i<B_SZ*B_SZ;i++){
fprintf(stderr,"%s%- 24.18G",i!=0?",":"",_sol->beta_1[4*B_SZ*B_SZ*i+j]);
}
}
fprintf(stderr,"];\n");
fprintf(stderr,"beta_0=[");
for(i=0;i<B_SZ*B_SZ;i++){
fprintf(stderr,"%s%- 24.18G",i!=0?",":"",_sol->beta_0[i]);
}
fprintf(stderr,"];\n");
#endif
}
#if PRINT_PROGRESS
static void print_progress(FILE *_fp,const char *_proc){
int tid;
tid=OD_OMP_GET_THREAD;
fprintf(_fp,"thread %i in %s\n",tid,_proc);
}
#endif
#if MASK_BLOCKS
static void ip_mask_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_mask_block");
}
#endif
if(_bi==0&&_bj==0){
classify_ctx *ctx;
ctx=(classify_ctx *)_ctx;
image_data_mask(&ctx->img,_data,_stride);
}
}
#endif
static void ip_pre_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_pre_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_pre_block(&ctx->img,_data,_stride,_bi,_bj);
}
static void ip_fdct_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_fdct_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_fdct_block(&ctx->img,_bi,_bj);
}
#if TF_BLOCKS
static void ip_tf_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_tf_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_tf_block(&ctx->img,_bi,_bj);
}
#endif
static void ip_add_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
od_coeff *block;
int by;
int bx;
int j;
int i;
double buf[5*B_SZ*B_SZ];
int mode;
double weight;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_add_block");
}
#endif
ctx=(classify_ctx *)_ctx;
#if MASK_BLOCKS
if(!ctx->img.mask[ctx->img.nxblocks*_bj+_bi]){
return;
}
#endif
for(by=0;by<=1;by++){
for(bx=0;bx<=(1-by)<<1;bx++){
#if TF_BLOCKS
block=&ctx->img.tf[ctx->img.fdct_stride*B_SZ*(_bj+by)+B_SZ*(_bi+bx)];
#else
block=&ctx->img.fdct[ctx->img.fdct_stride*B_SZ*(_bj+by)+B_SZ*(_bi+bx)];
#endif
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
buf[B_SZ*B_SZ*(3*by+bx)+j*B_SZ+i]=block[ctx->img.fdct_stride*j+i];
}
}
}
}
block=&ctx->img.fdct[ctx->img.fdct_stride*B_SZ*(_bj+1)+B_SZ*(_bi+1)];
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
buf[B_SZ*B_SZ*4+j*B_SZ+i]=block[ctx->img.fdct_stride*j+i];
}
}
#if SUBTRACT_DC
for(i=0;i<4;i++){
buf[4*B_SZ*B_SZ]-=0.25*buf[i*B_SZ*B_SZ];
}
#endif
mode=ctx->img.mode[ctx->img.nxblocks*_bj+_bi];
weight=ctx->img.weight[ctx->img.nxblocks*_bj+_bi];
od_covmat_add(&ctx->pd[mode],buf,weight);
}
#if PRINT_BLOCKS
static void ip_print_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_print_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_print_block(&ctx->img,_bi,_bj,stderr);
}
#endif
static void ip_pred_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress("ip_pred_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_pred_block(&ctx->img,_bi,_bj);
}
static void ip_idct_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress("ip_idct_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_idct_block(&ctx->img,_bi,_bj);
}
static void ip_post_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress("ip_post_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_post_block(&ctx->img,_bi,_bj);
}
double b[OD_INTRA_NMODES][B_SZ*B_SZ];
static void ip_stats_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress("ip_stats_block");
}
#endif
ctx=(classify_ctx *)_ctx;
#if MASK_BLOCKS
if(!ctx->img.mask[ctx->img.nxblocks*_bj+_bi]){
return;
}
#endif
image_data_stats_block(&ctx->img,_data,_stride,_bi,_bj,&ctx->st);
{
od_coeff *block;
int bstride;
double *pred;
int pstride;
int mode;
double *od_scale;
int j;
int i;
block=&ctx->img.fdct[ctx->img.fdct_stride*B_SZ*(_bj+1)+B_SZ*(_bi+1)];
bstride=ctx->img.fdct_stride;
pred=&ctx->img.pred[ctx->img.pred_stride*B_SZ*_bj+B_SZ*_bi];
pstride=ctx->img.pred_stride;
mode=ctx->img.mode[ctx->img.nxblocks*_bj+_bi];
od_scale=OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0];
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
double res;
res=sqrt(od_scale[j]*od_scale[i])*
abs(block[bstride*j+i]-(od_coeff)floor(pred[pstride*j+i]+0.5));
ctx->bits+=1+OD_LOG2(b[mode][j*B_SZ+i])+M_LOG2E/b[mode][j*B_SZ+i]*res;
}
}
}
}
#if WRITE_IMAGES
static void ip_files_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_files_block");
}
#endif
ctx=(classify_ctx *)_ctx;
image_data_files_block(&ctx->img,_data,_stride,_bi,_bj,&ctx->files);
}
#endif
int step;
static void vp8_mode_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
unsigned char *mode;
double *weight;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress(stdout,"ip_vp8_mode_block");
}
#endif
ctx=(classify_ctx *)_ctx;
mode=&ctx->img.mode[ctx->img.nxblocks*_bj+_bi];
weight=&ctx->img.weight[ctx->img.nxblocks*_bj+_bi];
*mode=vp8_select_mode(_data,_stride,weight);
#if USE_WEIGHTS
if(*mode==0){
*weight=1;
}
#else
*weight=1;
#endif
}
static void od_mode_block(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
classify_ctx *ctx;
unsigned char *mode;
od_coeff block[5*B_SZ*B_SZ];
double *weight;
(void)_data;
(void)_stride;
#if PRINT_PROGRESS
if(_bi==0&&_bj==0){
print_progress("od_mode_block");
}
#endif
ctx=(classify_ctx *)_ctx;
mode=&ctx->img.mode[ctx->img.nxblocks*_bj+_bi];
image_data_load_block(&ctx->img,_bi,_bj,block);
weight=&ctx->img.weight[ctx->img.nxblocks*_bj+_bi];
#if BITS_SELECT
if(step==1){
*mode=od_select_mode_satd(block,weight,ctx->img.b_sz_log);
}
else{
*mode=od_select_mode_bits(block,weight,b);
}
#else
*mode=od_select_mode_satd(block,weight,ctx->img.b_sz_log);
#endif
#if USE_WEIGHTS
if(*mode==0){
*weight=1;
}
#else
*weight=1;
#endif
}
static int init_start(void *_ctx,const char *_name,
const video_input_info *_info,int _pli,int _nxblocks,int _nyblocks){
classify_ctx *ctx;
(void)_info;
(void)_pli;
#if PRINT_PROGRESS
print_progress(stdout,"init_start");
#endif
fprintf(stdout,"%s\n",_name);
fflush(stdout);
ctx=(classify_ctx *)_ctx;
classify_ctx_set_image(ctx,_name,_nxblocks,_nyblocks);
return EXIT_SUCCESS;
}
static int init_finish(void *_ctx){
classify_ctx *ctx;
#if PRINT_PROGRESS
print_progress(stdout,"init_finish");
#endif
ctx=(classify_ctx *)_ctx;
/*intra_stats_combine(&ctx->gb,&ctx->st);
intra_stats_correct(&ctx->st);
fprintf(stdout,"%s\n",ctx->img.name);
intra_stats_print(&ctx->st,"Daala Intra Predictors",OD_SCALE);
fflush(stdout);*/
image_data_save_map(&ctx->img);
classify_ctx_clear_image(ctx);
return EXIT_SUCCESS;
}
const block_func INIT[]={
#if MASK_BLOCKS
ip_mask_block,
#endif
ip_pre_block,
ip_fdct_block,
#if TF_BLOCKS
ip_tf_block,
#endif
vp8_mode_block,
ip_add_block,
#if PRINT_BLOCKS
ip_print_block,
#endif
};
const int NINIT=sizeof(INIT)/sizeof(*INIT);
static int pred_start(void *_ctx,const char *_name,
const video_input_info *_info,int _pli,int _nxblocks,int _nyblocks){
classify_ctx *ctx;
(void)_info;
(void)_pli;
#if PRINT_PROGRESS
print_progress(stdout,"pred_start");
#endif
ctx=(classify_ctx *)_ctx;
classify_ctx_set_image(ctx,_name,_nxblocks,_nyblocks);
image_data_load_map(&ctx->img);
return EXIT_SUCCESS;
}
static int pred_finish(void *_ctx){
classify_ctx *ctx;
#if WRITE_IMAGES
char suffix[16];
#endif
#if PRINT_PROGRESS
print_progress(stdout,"pred_finish");
#endif
ctx=(classify_ctx *)_ctx;
intra_stats_combine(&ctx->gb,&ctx->st);
intra_stats_correct(&ctx->st);
fprintf(stdout,"%s\n",ctx->img.name);
intra_stats_print(&ctx->st,"Daala Intra Predictors",
OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0]);
fflush(stdout);
#if WRITE_IMAGES
sprintf(suffix,"-step%02i",step);
image_files_write(&ctx->files,ctx->img.name,suffix);
#endif
image_data_save_map(&ctx->img);
classify_ctx_clear_image(ctx);
return EXIT_SUCCESS;
}
const block_func PRED[]={
#if MASK_BLOCKS
ip_mask_block,
#endif
ip_pre_block,
ip_fdct_block,
#if TF_BLOCKS
ip_tf_block,
#endif
od_mode_block,
ip_add_block,
ip_pred_block,
ip_stats_block,
ip_idct_block,
ip_post_block,
#if WRITE_IMAGES
ip_files_block,
#endif
};
const int NPRED=sizeof(PRED)/sizeof(*PRED);
#define PADDING (4*B_SZ)
#if PADDING<3*B_SZ
# error "PADDING must be at least 3*B_SZ"
#endif
#define INIT_STEPS (10)
#if B_SZ==4
# define DROPS_PER_STEP (16)
#elif B_SZ==8
# define DROPS_PER_STEP (64)
#elif B_SZ==16
# define DROPS_PER_STEP (256)
#else
# error "Unsupported block size."
#endif
int main(int _argc,const char *_argv[]){
classify_ctx *cls;
int i;
int j;
ne_filter_params_init();
vp8_scale_init(VP8_SCALE[B_SZ_LOG-OD_LOG_BSIZE0],B_SZ_LOG);
od_scale_init(OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0],B_SZ_LOG);
#if WRITE_IMAGES
intra_map_colors(COLORS,OD_INTRA_NMODES);
#endif
cls=(classify_ctx *)malloc(sizeof(*cls)*NUM_PROCS);
for(i=0;i<NUM_PROCS;i++){
classify_ctx_init(&cls[i]);
}
od_intra_init();
OD_OMP_SET_THREADS(NUM_PROCS);
/* First pass across images uses VP8 mode selection. */
ne_apply_to_blocks(cls,sizeof(*cls),0x1,PADDING,init_start,NINIT,INIT,
init_finish,_argc,_argv);
for(i=1;i<NUM_PROCS;i++){
cls[0].n+=cls[i].n;
}
if(cls[0].n>0){
prob_ctx prob;
solve_ctx sol[NUM_PROCS];
od_covmat ete;
int *mask;
struct timeval start;
struct timeval stop;
prob_ctx_init(&prob);
for(i=0;i<NUM_PROCS;i++){
solve_ctx_init(&sol[i]);
}
od_covmat_init(&ete,B_SZ*B_SZ);
mask=(int *)malloc(sizeof(*mask)*OD_INTRA_NMODES*B_SZ*B_SZ*4*B_SZ*B_SZ);
#if TF_BLOCKS && TF_MASKING
for(j=0;j<OD_INTRA_NMODES;j++){
int *mode_mask;
int *coeff_mask;
int u;
int v;
mode_mask=&mask[B_SZ*B_SZ*4*B_SZ*B_SZ*j];
for(i=0;i<B_SZ*B_SZ;i++){
coeff_mask=&mode_mask[4*B_SZ*B_SZ*i];
for(v=0;v<B_SZ;v++){
for(u=0;u<B_SZ;u++){
/* UL */
coeff_mask[0*B_SZ*B_SZ+B_SZ*v+u]=v>=B_SZ-4&&u>=B_SZ-4;
/* U */
coeff_mask[1*B_SZ*B_SZ+B_SZ*v+u]=v>=B_SZ-4;
/* UR */
coeff_mask[2*B_SZ*B_SZ+B_SZ*v+u]=v>=B_SZ-4;
/* L */
coeff_mask[3*B_SZ*B_SZ+B_SZ*v+u]=u>=B_SZ-4;
}
}
}
}
#else
for(i=0;i<OD_INTRA_NMODES*B_SZ*B_SZ*4*B_SZ*B_SZ;i++){
mask[i]=1;
}
#endif
od_gettime(&start);
/* Each k-means step uses Daala mode selection. */
for(step=1;;step++){
int mults;
int drops;
#if TF_BLOCKS && TF_MASKING
mults=(B_SZ/4*3+1)*16*B_SZ*B_SZ;
#else
mults=B_SZ*B_SZ*4*B_SZ*B_SZ;
#endif
drops=0;
if(step>INIT_STEPS){
#if !MAKE_SPARSE
break;
#endif
mults-=DROPS_PER_STEP*(step-INIT_STEPS);
drops=DROPS_PER_STEP;
}
printf("Starting Step %02i (%i mults / block)\n",step,mults);
for(j=0;j<OD_INTRA_NMODES;j++){
int *mode_mask;
/* Combine the gathered prediction data. */
for(i=1;i<NUM_PROCS;i++){
od_covmat_combine(&cls[0].pd[j],&cls[i].pd[j]);
}
prob_ctx_load(&prob,&cls[0].pd[j]);
/* Update predictor model based on mults and drops. */
#if PRINT_DROPS
if(drops>0){
printf("Mode %i\n",j);
fflush(stdout);
}
#endif
mode_mask=&mask[B_SZ*B_SZ*4*B_SZ*B_SZ*j];
comp_predictors(&prob,sol,drops,mode_mask);
/* Compute residual covariance for each mode. */
prob_ctx_comp_error(&prob,&cls[0].pd[j],sol->beta_1);
#if ZERO_MEAN
{
double mean[B_SZ*B_SZ];
for(i=0;i<B_SZ*B_SZ;i++){
mean[i]=0;
}
od_covmat_update(&ete,prob.ete,mean,cls[0].pd[j].w);
}
#else
od_covmat_update(&ete,prob.ete,sol->beta_0,cls[0].pd[j].w);
#endif
#if !POOLED_COV
od_covmat_correct(&ete);
update_diversity(ete.cov,b[j],OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0]);
od_covmat_reset(&ete);
#endif
#if SUBTRACT_DC
for(i=0;i<4;i++){
OD_ASSERT(mode_mask[i*B_SZ*B_SZ]);
sol->beta_1[i*B_SZ*B_SZ]+=0.25;
}
#endif
update_predictors(j,sol->beta_0,sol->beta_1,mode_mask);
}
#if POOLED_COV
od_covmat_correct(&ete);
for(j=0;j<OD_INTRA_NMODES;j++){
update_diversity(ete.cov,b[j],OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0]);
}
od_covmat_reset(&ete);
#endif
#if PRINT_BETAS
fprintf(stderr,"Finished Step %02i\n",step);
print_predictors(stderr);
#if POOLED_COV
print_diversity(stderr,b[0],OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0]);
#endif
#endif
/* Reset the prediction data. */
for(i=0;i<NUM_PROCS;i++){
classify_ctx_reset(&cls[i]);
}
/* Reclassify based on the new model. */
ne_apply_to_blocks(cls,sizeof(*cls),0x1,PADDING,pred_start,NPRED,PRED,
pred_finish,_argc,_argv);
od_gettime(&stop);
printf("Finished Step %02i (%lims)\n",step,timing(&start,&stop));
start=stop;
/* Combine the gathered intra stats. */
for(i=1;i<NUM_PROCS;i++){
intra_stats_combine(&cls[0].gb,&cls[i].gb);
cls[0].bits+=cls[i].bits;
}
printf("Step %02i Total Bits %-24.18G\n",step,cls[0].bits);
intra_stats_correct(&cls[0].gb);
intra_stats_print(&cls[0].gb,"Daala Intra Predictors",
OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0]);
if (mults==4*B_SZ*B_SZ) {
break;
}
}
prob_ctx_clear(&prob);
for(i=0;i<NUM_PROCS;i++){
solve_ctx_clear(&sol[i]);
}
od_covmat_clear(&ete);
free(mask);
}
for(i=0;i<NUM_PROCS;i++){
classify_ctx_clear(&cls[i]);
}
free(cls);
od_intra_clear();
return EXIT_SUCCESS;
}
|
doall2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation
*/
int a[100][100];
int main()
{
int i,j;
#pragma omp parallel for private(j)
for (i=0;i<100;i++)
for (j=0;j<100;j++)
a[i][j]=a[i][j]+1;
return 0;
}
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdlib.h>
#include <dynmat.h>
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j);
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k);
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3]);
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance);
static void make_Hermitian(double *mat, const int num_band);
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3]);
int dym_get_dynamical_matrix_at_q(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int with_openmp)
{
int i, j, ij;
if (with_openmp) {
#pragma omp parallel for
for (ij = 0; ij < num_patom * num_patom ; ij++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
} else {
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
i,
j);
}
}
}
make_Hermitian(dynamical_matrix, num_patom * 3);
return 0;
}
void dym_get_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda,
const double tolerance)
{
int i, k, l, adrs, adrs_sum;
double *dd_tmp;
dd_tmp = NULL;
dd_tmp = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] = 0;
dd_tmp[i] = 0;
}
get_KK(dd_tmp,
G_list,
num_G,
num_patom,
q_cart,
q_direction_cart,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd, dd_tmp, num_patom, born);
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l;
adrs_sum = i * 9 + k * 3 + l;
dd[adrs * 2] -= dd_q0[adrs_sum * 2];
dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1];
}
}
}
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] *= factor;
}
/* This may not be necessary. */
make_Hermitian(dd, num_patom * 3);
free(dd_tmp);
dd_tmp = NULL;
}
void dym_get_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, adrs_tmp, adrs, adrsT;
double zero_vec[3];
double *dd_tmp1, *dd_tmp2;
dd_tmp1 = NULL;
dd_tmp1 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
dd_tmp2 = NULL;
dd_tmp2 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd_tmp1[i] = 0;
dd_tmp2[i] = 0;
}
zero_vec[0] = 0;
zero_vec[1] = 0;
zero_vec[2] = 0;
get_KK(dd_tmp1,
G_list,
num_G,
num_patom,
zero_vec,
NULL,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd_tmp2, dd_tmp1, num_patom, born);
for (i = 0; i < num_patom * 18; i++) {
dd_q0[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
for (j = 0; j < num_patom; j++) {
adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ;
dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2];
dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1];
}
}
}
}
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
adrsT = i * 9 + l * 3 + k;
dd_q0[adrs * 2] += dd_q0[adrsT * 2];
dd_q0[adrs * 2] /= 2;
dd_q0[adrsT * 2] = dd_q0[adrs * 2];
dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1];
dd_q0[adrs * 2 + 1] /= 2;
dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1];
}
}
}
free(dd_tmp1);
dd_tmp1 = NULL;
free(dd_tmp2);
dd_tmp2 = NULL;
}
void dym_get_charge_sum(double (*charge_sum)[3][3],
const int num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3],
PHPYCONST double (*born)[3][3])
{
int i, j, k, a, b;
double (*q_born)[3];
q_born = (double (*)[3]) malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
q_born[i][j] += q_cart[k] * born[i][k][j];
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
charge_sum[i * num_patom + j][a][b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
/* fc[num_patom, num_satom, 3, 3] */
/* dm[num_comm_points, num_patom * 3, num_patom *3] */
/* comm_points[num_satom, num_patom, 27, 3] */
/* shortest_vectors[num_satom, num_patom, 27, 3] */
/* multiplicities[num_satom, num_patom] */
void dym_transform_dynmat_to_fc(double *fc,
const double *dm,
PHPYCONST double (*comm_points)[3],
PHPYCONST double (*shortest_vectors)[27][3],
const int *multiplicities,
const double *masses,
const int *s2pp_map,
const int num_patom,
const int num_satom)
{
int i, j, k, l, m, N, adrs, multi;
double coef, phase, cos_phase, sin_phase;
N = num_satom / num_patom;
for (i = 0; i < num_patom * num_satom * 9; i++) {
fc[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_satom; j++) {
coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N;
for (k = 0; k < N; k++) {
cos_phase = 0;
sin_phase = 0;
multi = multiplicities[j * num_patom + i];
for (l = 0; l < multi; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase -= comm_points[k][m] *
shortest_vectors[j * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI);
sin_phase += sin(phase * 2 * PI);
}
cos_phase /= multi;
sin_phase /= multi;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 +
l * num_patom * 6 + s2pp_map[j] * 6 + m * 2;
fc[i * num_satom * 9 + j * 9 + l * 3 + m] +=
(dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef;
}
}
}
}
}
}
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j)
{
int k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j]) {
continue;
}
get_dm(dm_real,
dm_imag,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
p2s_map,
charge_sum,
i,
j,
k);
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l;
dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt;
dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt;
}
}
}
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k)
{
int l, m;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < multi[k * num_patom + i]; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase += q[m] * svecs[k * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI) / multi[k * num_patom + i];
sin_phase += sin(phase * 2 * PI) / multi[k * num_patom + i];
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
if (charge_sum) {
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom + j][l][m]);
} else {
fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m];
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3])
{
int i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++) {
x[i] = 0;
for (j = 0; j < 3; j++) {
x[i] += dielectric[i][j] * q_cart[j];
}
}
sum = 0;
for (i = 0; i < 3; i++) {
sum += q_cart[i] * x[i];
}
return sum;
}
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, g, adrs;
double q_K[3];
double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2;
double KK[3][3];
L2 = 4 * lambda * lambda;
/* sum over K = G + q and over G (i.e. q=0) */
/* q_direction has values for summation over K at Gamma point. */
/* q_direction is NULL for summation over G */
for (g = 0; g < num_G; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
q_K[i] = G_list[g][i] + q_cart[i];
norm += q_K[i] * q_K[i];
}
if (sqrt(norm) < tolerance) {
if (!q_direction_cart) {
continue;
} else {
dielectric_part = get_dielectric_part(q_direction_cart, dielectric);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] =
q_direction_cart[i] * q_direction_cart[j] / dielectric_part;
}
}
}
} else {
dielectric_part = get_dielectric_part(q_K, dielectric);
exp_damp = exp(-dielectric_part / L2);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp;
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
/* For D-type dynamical matrix */
/* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */
/* For C-type dynamical matrix */
phase += (pos[i][k] - pos[j][k]) * G_list[g][k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_part[adrs * 2] += KK[k][l] * cos_phase;
dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase;
}
}
}
}
}
}
static void make_Hermitian(double *mat, const int num_band)
{
int i, j, adrs, adrsT;
for (i = 0; i < num_band; i++) {
for (j = i; j < num_band; j++) {
adrs = i * num_band + j * 1;
adrs *= 2;
adrsT = j * num_band + i * 1;
adrsT *= 2;
/* real part */
mat[adrs] += mat[adrsT];
mat[adrs] /= 2;
/* imaginary part */
mat[adrs + 1] -= mat[adrsT+ 1];
mat[adrs + 1] /= 2;
/* store */
mat[adrsT] = mat[adrs];
mat[adrsT + 1] = -mat[adrs + 1];
}
}
}
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3])
{
int i, j, k, l, m, n, adrs, adrs_in;
double zz;
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
for (m = 0; m < 3; m++) { /* alpha' */
for (n = 0; n < 3; n++) { /* beta' */
adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n ;
zz = born[i][m][k] * born[j][n][l];
dd[adrs * 2] += dd_in[adrs_in * 2] * zz;
dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz;
}
}
}
}
}
}
}
|
convolution_7x7.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = out + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* r7 = img0 + w*7;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3
float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7
float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6
float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8
float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r10n = vld1q_f32(r1 + 8);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r15 = vextq_f32(_r14, _r10n, 1);
float32x4_t _r16 = vextq_f32(_r14, _r10n, 2);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r20n = vld1q_f32(r2 + 8);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r25 = vextq_f32(_r24, _r20n, 1);
float32x4_t _r26 = vextq_f32(_r24, _r20n, 2);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r30n = vld1q_f32(r3 + 8);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r35 = vextq_f32(_r34, _r30n, 1);
float32x4_t _r36 = vextq_f32(_r34, _r30n, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r40n = vld1q_f32(r4 + 8);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r45 = vextq_f32(_r44, _r40n, 1);
float32x4_t _r46 = vextq_f32(_r44, _r40n, 2);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r50n = vld1q_f32(r5 + 8);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
float32x4_t _r55 = vextq_f32(_r54, _r50n, 1);
float32x4_t _r56 = vextq_f32(_r54, _r50n, 2);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4_t _r60 = vld1q_f32(r6);
float32x4_t _r64 = vld1q_f32(r6 + 4);
float32x4_t _r60n = vld1q_f32(r6 + 8);
float32x4_t _r61 = vextq_f32(_r60, _r64, 1);
float32x4_t _r62 = vextq_f32(_r60, _r64, 2);
float32x4_t _r63 = vextq_f32(_r60, _r64, 3);
float32x4_t _r65 = vextq_f32(_r64, _r60n, 1);
float32x4_t _r66 = vextq_f32(_r64, _r60n, 2);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d25}, [%1] \n"// _sum
"veor q13, q13 \n"// _sum2 = 0;
"veor q14, q14 \n"// _sum3 = 0;
"veor q15, q15 \n"// _sum4 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3
"vmla.f32 q12, q0, d8[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4
"vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5
"vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5] \n"
"vmla.f32 q14, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q15, q1, d12[1] \n"
"vmla.f32 q12, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #128] \n"
"vld1.f32 {d0-d1}, [%6]! \n"
"vmla.f32 q12, q0, d8[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #128] \n"
"vld1.f32 {d0-d1}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d4-d7}, [%8] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vst1.f32 {d24-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
outptr++;
}
r0 += 6;
r1 += 6;
r2 += 6;
r3 += 6;
r4 += 6;
r5 += 6;
r6 += 6;
}
}
}
}
static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = out + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11
float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2);
float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2);
float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2);
float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2);
float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4x2_t _r50_02461357 = vld2q_f32(r5);
float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8);
float32x4_t _r5_8101214 = _r50nx2.val[0];
float32x4_t _r5_9111315 = _r50nx2.val[1];
float32x4_t _r50 = _r50_02461357.val[0];
float32x4_t _r51 = _r50_02461357.val[1];
float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1);
float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1);
float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2);
float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2);
float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4x2_t _r60_02461357 = vld2q_f32(r6);
float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8);
float32x4_t _r6_8101214 = _r60nx2.val[0];
float32x4_t _r6_9111315 = _r60nx2.val[1];
float32x4_t _r60 = _r60_02461357.val[0];
float32x4_t _r61 = _r60_02461357.val[1];
float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1);
float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1);
float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2);
float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2);
float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d26-d27}, [%1] \n"// _sum
"veor q14, q14 \n"// _sum2 = 0;
"veor q15, q15 \n"// _sum3 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #512] \n"
"vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15
"vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8
"vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10
"vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12
"vmla.f32 q13, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #512] \n"
"vld2.f32 {d0-d3}, [%3]! \n"
"vmla.f32 q14, q0, d12[0] \n"
"vmla.f32 q15, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%3] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q13, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #512] \n"
"vld2.f32 {d0-d3}, [%4]! \n"
"vmla.f32 q15, q0, d8[0] \n"
"vmla.f32 q13, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%4] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #512] \n"
"vld2.f32 {d0-d3}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"vmla.f32 q14, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%5] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #512] \n"
"vld2.f32 {d0-d3}, [%6]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"vmla.f32 q15, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%6] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #512] \n"
"vld2.f32 {d0-d3}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"vmla.f32 q13, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%7] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d14[0] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #512] \n"
"vld2.f32 {d0-d3}, [%8]! \n"
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%8] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d11[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q13, q13, q14 \n"
"vst1.f32 {d26-d27}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
#if NCNN_CNNCACHE
static void conv7x7s1_neon_cached(
const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, bool* cached_map)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
// const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
// out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = out + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* r7 = img0 + w*7;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3
float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7
float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6
float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8
float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r10n = vld1q_f32(r1 + 8);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r15 = vextq_f32(_r14, _r10n, 1);
float32x4_t _r16 = vextq_f32(_r14, _r10n, 2);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r20n = vld1q_f32(r2 + 8);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r25 = vextq_f32(_r24, _r20n, 1);
float32x4_t _r26 = vextq_f32(_r24, _r20n, 2);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r30n = vld1q_f32(r3 + 8);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r35 = vextq_f32(_r34, _r30n, 1);
float32x4_t _r36 = vextq_f32(_r34, _r30n, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r40n = vld1q_f32(r4 + 8);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r45 = vextq_f32(_r44, _r40n, 1);
float32x4_t _r46 = vextq_f32(_r44, _r40n, 2);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r50n = vld1q_f32(r5 + 8);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
float32x4_t _r55 = vextq_f32(_r54, _r50n, 1);
float32x4_t _r56 = vextq_f32(_r54, _r50n, 2);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4_t _r60 = vld1q_f32(r6);
float32x4_t _r64 = vld1q_f32(r6 + 4);
float32x4_t _r60n = vld1q_f32(r6 + 8);
float32x4_t _r61 = vextq_f32(_r60, _r64, 1);
float32x4_t _r62 = vextq_f32(_r60, _r64, 2);
float32x4_t _r63 = vextq_f32(_r60, _r64, 3);
float32x4_t _r65 = vextq_f32(_r64, _r60n, 1);
float32x4_t _r66 = vextq_f32(_r64, _r60n, 2);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d25}, [%1] \n"// _sum
"veor q13, q13 \n"// _sum2 = 0;
"veor q14, q14 \n"// _sum3 = 0;
"veor q15, q15 \n"// _sum4 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3
"vmla.f32 q12, q0, d8[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4
"vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5
"vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5] \n"
"vmla.f32 q14, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q15, q1, d12[1] \n"
"vmla.f32 q12, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #128] \n"
"vld1.f32 {d0-d1}, [%6]! \n"
"vmla.f32 q12, q0, d8[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #128] \n"
"vld1.f32 {d0-d1}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d4-d7}, [%8] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vst1.f32 {d24-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
if (cached_map[outptr - out]) {
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
}
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
outptr++;
}
r0 += 6;
r1 += 6;
r2 += 6;
r3 += 6;
r4 += 6;
r5 += 6;
r6 += 6;
}
}
}
}
static void conv7x7s2_neon_cached(
const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, bool* cached_map)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
// const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
// out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = out + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11
float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2);
float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2);
float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2);
float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2);
float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4x2_t _r50_02461357 = vld2q_f32(r5);
float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8);
float32x4_t _r5_8101214 = _r50nx2.val[0];
float32x4_t _r5_9111315 = _r50nx2.val[1];
float32x4_t _r50 = _r50_02461357.val[0];
float32x4_t _r51 = _r50_02461357.val[1];
float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1);
float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1);
float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2);
float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2);
float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4x2_t _r60_02461357 = vld2q_f32(r6);
float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8);
float32x4_t _r6_8101214 = _r60nx2.val[0];
float32x4_t _r6_9111315 = _r60nx2.val[1];
float32x4_t _r60 = _r60_02461357.val[0];
float32x4_t _r61 = _r60_02461357.val[1];
float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1);
float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1);
float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2);
float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2);
float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d26-d27}, [%1] \n"// _sum
"veor q14, q14 \n"// _sum2 = 0;
"veor q15, q15 \n"// _sum3 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #512] \n"
"vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15
"vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8
"vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10
"vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12
"vmla.f32 q13, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #512] \n"
"vld2.f32 {d0-d3}, [%3]! \n"
"vmla.f32 q14, q0, d12[0] \n"
"vmla.f32 q15, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%3] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q13, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #512] \n"
"vld2.f32 {d0-d3}, [%4]! \n"
"vmla.f32 q15, q0, d8[0] \n"
"vmla.f32 q13, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%4] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #512] \n"
"vld2.f32 {d0-d3}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"vmla.f32 q14, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%5] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #512] \n"
"vld2.f32 {d0-d3}, [%6]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"vmla.f32 q15, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%6] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #512] \n"
"vld2.f32 {d0-d3}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"vmla.f32 q13, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%7] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d14[0] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #512] \n"
"vld2.f32 {d0-d3}, [%8]! \n"
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%8] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d11[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q13, q13, q14 \n"
"vst1.f32 {d26-d27}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
if (cached_map[outptr - out]) {
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
#endif // NCNN_CNNCACHE |
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/imperative.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape &shape,
const Context &ctx,
const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template<typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 ||
dtype == mshadow::kInt32 || dtype == mshadow::kInt64;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
inline const std::string
NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator
profiler_scope_iter = node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
inline int GetDefaultDtype() {
return Imperative::Get()->is_np_default_dtype() ?
mshadow::kFloat64 :
mshadow::kFloat32;
}
inline int GetDefaultDtype(int dtype) {
if (dtype != -1) return dtype;
return Imperative::Get()->is_np_default_dtype() ?
mshadow::kFloat64 :
mshadow::kFloat32;
}
inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) {
#if _MSC_VER
*ptr = _aligned_malloc(size, alignment);
if (*ptr == nullptr)
return false;
#else
int res = posix_memalign(ptr, alignment, size);
if (res != 0)
return false;
#endif
return true;
}
inline void AlignedMemFree(void* ptr) {
#if _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
GB_binop__first_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int64)
// A*D function (colscale): GB (_AxD__first_int64)
// D*A function (rowscale): GB (_DxB__first_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__first_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__first_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT64 || GxB_NO_FIRST_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/policy.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/registry.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op)
{
const char
*blend_mode;
switch (op)
{
case ColorBurnCompositeOp: blend_mode = "idiv"; break;
case ColorDodgeCompositeOp: blend_mode = "div "; break;
case ColorizeCompositeOp: blend_mode = "colr"; break;
case DarkenCompositeOp: blend_mode = "dark"; break;
case DifferenceCompositeOp: blend_mode = "diff"; break;
case DissolveCompositeOp: blend_mode = "diss"; break;
case ExclusionCompositeOp: blend_mode = "smud"; break;
case HardLightCompositeOp: blend_mode = "hLit"; break;
case HardMixCompositeOp: blend_mode = "hMix"; break;
case HueCompositeOp: blend_mode = "hue "; break;
case LightenCompositeOp: blend_mode = "lite"; break;
case LinearBurnCompositeOp: blend_mode = "lbrn"; break;
case LinearDodgeCompositeOp:blend_mode = "lddg"; break;
case LinearLightCompositeOp:blend_mode = "lLit"; break;
case LuminizeCompositeOp: blend_mode = "lum "; break;
case MultiplyCompositeOp: blend_mode = "mul "; break;
case OverCompositeOp: blend_mode = "norm"; break;
case OverlayCompositeOp: blend_mode = "over"; break;
case PinLightCompositeOp: blend_mode = "pLit"; break;
case SaturateCompositeOp: blend_mode = "sat "; break;
case ScreenCompositeOp: blend_mode = "scrn"; break;
case SoftLightCompositeOp: blend_mode = "sLit"; break;
case VividLightCompositeOp: blend_mode = "vLit"; break;
default: blend_mode = "norm"; break;
}
return(blend_mode);
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image, ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->matte == MagickFalse || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringNotFalse(option) == MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
if (gamma != 0.0 && gamma != 1.0)
{
SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == QuantumRange)
return(MagickTrue);
if (image->matte != MagickTrue)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity)));
else if (opacity > 0)
SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/
(MagickRealType) opacity)));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
MagickPixelPacket
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->matte=MagickTrue;
GetMagickPixelPacket(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color);
status=CompositeImage(complete_mask,OverCompositeOp,mask,
mask->page.x-image->page.x,mask->page.y-image->page.y);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->matte=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha)));
else if (intensity > 0)
SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange));
q++;
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if (name_length % 2 == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MaxTextExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->x_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->x_resolution);
(void) SetImageProperty(image,"tiff:XResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->y_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->y_resolution);
(void) SetImageProperty(image,"tiff:YResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,
PixelPacket *q,IndexPacket *indexes,ssize_t x)
{
if (image->storage_class == PseudoClass)
{
PixelPacket
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel));
else
SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel));
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
(ssize_t) GetPixelIndex(indexes+x));
if ((type == 0) && (channels > 1))
return;
else
SetPixelAlpha(color,pixel);
SetPixelRGBO(q,color);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(q,pixel);
break;
}
case -2:
case 0:
{
SetPixelRed(q,pixel);
if (channels < 3 || type == -2)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
break;
}
case -3:
case 1:
{
SetPixelGreen(q,pixel);
break;
}
case -4:
case 2:
{
SetPixelBlue(q,pixel);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,pixel);
else
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q++,indexes,x);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
mask->matte=MagickFalse;
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
(void) SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
(void) SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if (status != MagickFalse && layer_info->mask.image != (Image *) NULL)
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].page.x=(ssize_t) ReadBlobSignedLong(image);
y=(ssize_t) ReadBlobSignedLong(image);
x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickFalse);
return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image* image,const PSDInfo* psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateImage(image,MagickFalse);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace);
image->matte=psd_info.channels > 4 ? MagickTrue : MagickFalse;
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,(size_t) (psd_info.depth != 16 ? 256 :
65536));
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace);
image->matte=psd_info.channels > 1 ? MagickTrue : MagickFalse;
}
else
image->matte=psd_info.channels > 3 ? MagickTrue : MagickFalse;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->matte=MagickFalse;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if (has_merged_image != MagickFalse || imageListLength == 1)
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.opacity=TransparentOpacity;
(void) SetImageBackgroundColor(image);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
// (void) SetImageProfile(image,GetStringInfoName(profile),profile);
// duplicate the profile to all layers
Image *onelayer = image;
while (onelayer && onelayer->next != image) {
(void) SetImageProfile(onelayer,GetStringInfoName(profile),profile);
onelayer = onelayer->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PSB");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Large Document Format");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PSD");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Photoshop bitmap");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
ssize_t
i,
offset,
y;
if (next_image->compression == RLECompression)
{
offset=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
offset+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
offset=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
offset=WriteBlobMSBShort(image,Raw);
return((size_t) offset);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1)
? MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(Image *image)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
}
return(compact_pixels);
}
static ssize_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
length,
offset_length;
ssize_t
count;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsGrayImage(next_image,&next_image->exception) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ?
4 : 3);
if (next_image->matte != MagickFalse)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t)
channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsGrayImage(next_image,&next_image->exception) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->matte != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->x_resolution+0.5;
y_resolution=2.54*65536.0*image->y_resolution+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->x_resolution+0.5;
y_resolution=65536.0*image->y_resolution+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
ssize_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return((size_t) count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image)
{
char
layer_name[MaxTextExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->matte != MagickFalse)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,&image->exception) != MagickFalse))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorMatteType) && (image->storage_class == PseudoClass))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->colorspace != CMYKColorspace)
num_channels=(image->matte != MagickFalse ? 4UL : 3UL);
else
num_channels=(image->matte != MagickFalse ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsGrayImage(image,&image->exception) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsMonochromeImage(image,&image->exception) &&
(image->depth == 1) ? MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsGrayImage(image,&image->exception) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *)NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
(void) SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->matte != MagickFalse)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ?
4 : 3);
total_channels=channels;
if (next_image->matte != MagickFalse)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->matte != MagickFalse)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image->compose));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,
&image->exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image);
property=(const char *) GetImageProperty(next_image,"label");
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,&image->exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(unsigned char) (
mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,
MagickFalse) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
kmeans_openmp.c | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#define MICRO_IN_SEC 1000000.00
#define sqr(x) ((x)*(x))
#define MAX_ITERATIONS 100
#define DOUBLE_INFINITY (INFINITY)
#define CLUSTER_NUM 1000
void kmeans(
int dim, // dimension of data
double *ipmatrix, // pointer to data
int numele, // number of elements
int cluster_num, // number of clusters
double *centmatrix_final, // output cluster centroids
int *cluster_assignment_final // output
);
//__declspec(target(mic)) double begin_time, end_time;
double begin_time, end_time;
//__declspec(target(mic))
double microtime(){
int tv_sec,tv_usec;
double time;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv,&tz);
return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC;
}
//calculate the distance between points p1 and p2
//__declspec(target(mic))
inline double calc_distance(int dim, double *p1, double *p2)
{
double distance_sq_sum = 0;
int ii=0;
for (ii = 0; ii < dim; ii++)
distance_sq_sum += sqr(p1[ii] - p2[ii]);
return distance_sq_sum;
}
inline void fail(char * const str) {
printf("%s\n",str);
exit(-1);
}
//print the final result
void cluster_diag(int dim, int n, int k, double *X, int *cluster_assignment_index, double *cluster_centroid)
{
int i;
int cluster_member_count[CLUSTER_NUM];
for(i=0; i < k; i++) {
cluster_member_count[i] = 0;
}
for(i=0; i < n; i++) {
cluster_member_count[ cluster_assignment_index[i] ]++;
}
printf(" Final clusters \n");
for ( i = 0; i < k; i++)
printf(" cluster %d: members: %8d, centroid (%.1f %.1f) \n", i, cluster_member_count[i], cluster_centroid[i*dim + 0], cluster_centroid[i*dim + 1]);
}
void copy_result(int in, int * const isrc, int * const itgt, int dn, double * const dsrc, double * const dtgt)
{
int i;
for (i = 0; i < in; i++)
itgt[i] = isrc[i];
for(i = 0; i < dn; i++)
dtgt[i] = dsrc[i];
}
int main(int argc, char** argv)
{
int numele=0; //number of rows
int dim = 0; //number of columns
int i,j,k;
FILE *ipfile; //input file
begin_time = microtime();
if ((ipfile = fopen(argv[1],"r"))==NULL)
{
printf("Error: can't open input file %s\n",argv[1]);
exit(1);
}
if(fscanf(ipfile,"# num rows=%d num columns=%d",&numele,&dim)!=2)
{
printf("Format error in first line\n");
exit(1);
} else {
printf("num rows=%d num columns=%d\n",numele,dim);
}
double * ipmatrix=(double *)malloc(numele*dim*sizeof(double)); //ipmatrix is the pointer of the array of input matrix
if (ipmatrix==NULL) {
printf(" malloc failed\n");
exit(1);
}
for (i=0; i<numele; i++)
{
for (j=0; j<dim; j++)
{
fscanf(ipfile,"%lf",&(ipmatrix[i*dim+j]));
}
}
int * cluster_assignment_final = (int *) malloc(sizeof(int)*numele); //the cluster assignment of every row data of ipmatrix
double * centmatrix_final = (double *) malloc (CLUSTER_NUM*dim*sizeof(double)); //centmatrix is the pointer of the array of initial centroid
kmeans(dim, ipmatrix, numele, CLUSTER_NUM, centmatrix_final, cluster_assignment_final); //call kmeans
printf("successful\n");
cluster_diag(dim, numele, CLUSTER_NUM, ipmatrix, cluster_assignment_final, centmatrix_final);
free(ipmatrix);
free(cluster_assignment_final);
free(centmatrix_final);
return 0;
}
int kmeans_itera_operator(
int dim,
double *ipmatrix,
int numele,
int cluster_num,
double *centmatrix_cur,
double *centmatrix_pre,
int *cluster_assignment_cur,
int *cluster_assignment_pre,
double *tot_D
) {
int *cluster_count = (int *)malloc(sizeof(int) * cluster_num);
int change_count = 0;
int i,j,cluster_assignment;
cluster_assignment=0;
double min_D,dist;
// init
double tot_D_tmp = 0;
for(i=0; i < cluster_num; i++) {
cluster_count[i] = 0;
for(j=0; j < dim; j++) {
centmatrix_cur[i*dim+j] = 0;
}
}
end_time = microtime();
printf("serial part cost time: %fs\n",end_time-begin_time);
begin_time = end_time;
//calculate all distance and choose the cluster, at the same time calculate total distance and new centroids.
/*
#pragma offload target(mic) \
in(ipmatrix:length(numele*dim) alloc_if(1) free_if(1)) \
in(centmatrix_pre:length(cluster_num*dim) alloc_if(1) free_if(1)) \
in(cluster_assignment_pre:length(numele) alloc_if(1) free_if(1)) \
out(cluster_assignment_cur:length(numele) alloc_if(1) free_if(1))
*/
#pragma omp parallel for private(min_D, cluster_assignment, dist) reduction(+:tot_D_tmp, change_count)
for(i=0; i < numele; i++) {
min_D = DOUBLE_INFINITY;
cluster_assignment = -1;
//calculate the distance and choose the cluster
for(j=0; j < cluster_num; j++) {
dist = calc_distance(dim, ipmatrix+i*dim, centmatrix_pre+j*dim);
if(dist < min_D) {
min_D = dist;
cluster_assignment = j;
}
}
if(cluster_assignment != cluster_assignment_pre[i]) {
change_count++ ;
}
//calculate the total distance and new centroids
cluster_assignment_cur[i] = cluster_assignment;
tot_D_tmp += min_D;
}
end_time = microtime();
printf("parallel part cost time: %fs\n",end_time-begin_time);
begin_time = end_time;
for(i=0; i< numele; i++) {
cluster_count[ cluster_assignment_cur[i] ]++;
for(j=0; j < dim; j++) {
centmatrix_cur[ cluster_assignment_cur[i] * dim + j] += ipmatrix[i*dim + j];
}
}
// for(i=0;i<20; i++)
// printf("cluster%d count:%d\n",i,cluster_count[i]);
*tot_D = tot_D_tmp;
for(i=0; i < cluster_num; i++) {
if( cluster_count[i] < 1 )
continue;
for(j=0; j < dim; j++) {
centmatrix_cur[i*dim + j] = centmatrix_cur[i*dim + j] / cluster_count[i];
}
}
end_time = microtime();
printf("serail part cost time: %fs\n",end_time-begin_time);
free(cluster_count);
return change_count;
}
void kmeans(
int dim, // dimension of data
double *ipmatrix, // pointer to data
int numele, // number of elements
int cluster_num, // number of clusters
double *centmatrix_final, // output cluster centroids
int *cluster_assignment_final // output
)
{
int *cluster_assignment_cur = (int *)malloc(sizeof(int) * numele);
int *cluster_assignment_pre = (int *)malloc(sizeof(int) * numele);
double *centmatrix_cur = (double *)malloc(sizeof(double) * cluster_num * dim);
double *centmatrix_pre = (double *)malloc(sizeof(double) * cluster_num * dim);
int i, j, *cluster_assignment_tmp;
double totD, prev_totD, *centmatrix_tmp;
if (!cluster_assignment_cur || !cluster_assignment_pre || !centmatrix_pre || !centmatrix_cur)
fail("Error allocating memory!");
// initial setup
//choose the first CLUSTER_NUM of ipmatrix as the centmatrix
for( i=0; i< cluster_num * dim ; i++)
{
centmatrix_pre[i] = ipmatrix[i];
}
kmeans_itera_operator(dim, ipmatrix, numele, cluster_num, centmatrix_cur, centmatrix_pre, cluster_assignment_cur, cluster_assignment_pre, &totD);
prev_totD = DOUBLE_INFINITY;
exit(0);
// Iteration calculate
int batch_iteration = MAX_ITERATIONS;
while ( batch_iteration-- )
{
printf(" batch_iteration = %d\n", batch_iteration);
// cluster_diag(dim, numele, CLUSTER_NUM, ipmatrix, cluster_assignment_cur, centmatrix_cur);
prev_totD = totD;
cluster_assignment_tmp = cluster_assignment_cur;
cluster_assignment_cur = cluster_assignment_pre;
cluster_assignment_pre = cluster_assignment_tmp;
centmatrix_tmp = centmatrix_cur;
centmatrix_cur = centmatrix_pre;
centmatrix_pre = centmatrix_tmp;
if(kmeans_itera_operator(dim, ipmatrix, numele, cluster_num, centmatrix_cur, centmatrix_pre, cluster_assignment_cur, cluster_assignment_pre, &totD) == 0) {
break;
}
if( prev_totD < totD ) {
printf("%lf %lf\n",prev_totD,totD);
cluster_assignment_tmp = cluster_assignment_cur;
cluster_assignment_cur = cluster_assignment_pre;
cluster_assignment_pre = cluster_assignment_tmp;
centmatrix_tmp = centmatrix_cur;
centmatrix_cur = centmatrix_pre;
centmatrix_pre = centmatrix_tmp;
break;
}
}
// cluster_diag(dim, numele, CLUSTER_NUM, ipmatrix, cluster_assignment_cur, centmatrix_cur);
copy_result(numele, cluster_assignment_cur, cluster_assignment_final, cluster_num * dim, centmatrix_cur, centmatrix_final);
free(cluster_assignment_cur);
free(cluster_assignment_pre);
free(centmatrix_cur);
free(centmatrix_pre);
}
|
GridInit.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
// Generates randomized energy grid for each nuclide
// Note that this is done as part of initialization (serial), so
// rand() is used.
void generate_grids( NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints ) {
for( long i = 0; i < n_isotopes; i++ )
for( long j = 0; j < n_gridpoints; j++ )
{
nuclide_grids[i][j].energy =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].total_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].elastic_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].absorbtion_xs=((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].fission_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].nu_fission_xs=((double)rand()/(double)RAND_MAX);
}
}
// Verification version of this function (tighter control over RNG)
void generate_grids_v( NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints ) {
for( long i = 0; i < n_isotopes; i++ )
for( long j = 0; j < n_gridpoints; j++ )
{
nuclide_grids[i][j].energy = rn_v();
nuclide_grids[i][j].total_xs = rn_v();
nuclide_grids[i][j].elastic_xs = rn_v();
nuclide_grids[i][j].absorbtion_xs= rn_v();
nuclide_grids[i][j].fission_xs = rn_v();
nuclide_grids[i][j].nu_fission_xs= rn_v();
}
}
// Sorts the nuclide grids by energy (lowest -> highest)
void sort_nuclide_grids( NuclideGridPoint ** nuclide_grids, long n_isotopes,
long n_gridpoints )
{
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
for( long i = 0; i < n_isotopes; i++ )
qsort( nuclide_grids[i], n_gridpoints, sizeof(NuclideGridPoint),
cmp );
// error debug check
/*
for( int i = 0; i < n_isotopes; i++ )
{
printf("NUCLIDE %d ==============================\n", i);
for( int j = 0; j < n_gridpoints; j++ )
printf("E%d = %lf\n", j, nuclide_grids[i][j].energy);
}
*/
}
// Allocates unionized energy grid, and assigns union of energy levels
// from nuclide grids to it.
GridPoint * generate_energy_grid( long n_isotopes, long n_gridpoints,
NuclideGridPoint ** nuclide_grids) {
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Generating Unionized Energy Grid...\n");
long n_unionized_grid_points = n_isotopes*n_gridpoints;
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
GridPoint * energy_grid = (GridPoint *)malloc( n_unionized_grid_points
* sizeof( GridPoint ) );
if( mype == 0 ) printf("Copying and Sorting all nuclide grids...\n");
NuclideGridPoint ** n_grid_sorted = gpmatrix( n_isotopes, n_gridpoints );
memcpy( n_grid_sorted[0], nuclide_grids[0], n_isotopes*n_gridpoints*
sizeof( NuclideGridPoint ) );
qsort( &n_grid_sorted[0][0], n_unionized_grid_points,
sizeof(NuclideGridPoint), cmp);
if( mype == 0 ) printf("Assigning energies to unionized grid...\n");
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].energy = n_grid_sorted[0][i].energy;
gpmatrix_free(n_grid_sorted);
int * full = (int *) malloc( n_isotopes * n_unionized_grid_points
* sizeof(int) );
if( full == NULL )
{
fprintf(stderr,"ERROR - Out Of Memory!\n");
exit(1);
}
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].xs_ptrs = &full[n_isotopes * i];
// debug error checking
/*
for( int i = 0; i < n_unionized_grid_points; i++ )
printf("E%d = %lf\n", i, energy_grid[i].energy);
*/
return energy_grid;
}
// Searches each nuclide grid for the closest energy level and assigns
// pointer from unionized grid to the correct spot in the nuclide grid.
// This process is time consuming, as the number of binary searches
// required is: binary searches = n_gridpoints * n_isotopes^2
void set_grid_ptrs( GridPoint * energy_grid, NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints )
{
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Assigning pointers to Unionized Energy Grid...\n");
#ifdef OPENMP
#pragma omp parallel for default(none) \
shared( energy_grid, nuclide_grids, n_isotopes, n_gridpoints, mype )
#endif
for( long i = 0; i < n_isotopes * n_gridpoints ; i++ )
{
int nthreads = 1, tid = 0;
double quarry = energy_grid[i].energy;
#ifdef OPENMP
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#endif
if( INFO && mype == 0 && tid == 0 && i % 200 == 0 )
printf("\rAligning Unionized Grid...(%.0lf%% complete)",
100.0 * (double) i / (n_isotopes*n_gridpoints /
nthreads) );
for( long j = 0; j < n_isotopes; j++ )
{
// j is the nuclide i.d.
// log n binary search
energy_grid[i].xs_ptrs[j] =
binary_search( nuclide_grids[j], quarry, n_gridpoints);
}
}
if( mype == 0 ) printf("\n");
//test
/*
for( int i=0; i < n_isotopes * n_gridpoints; i++ )
for( int j = 0; j < n_isotopes; j++ )
printf("E = %.4lf\tNuclide %d->%p->%.4lf\n",
energy_grid[i].energy,
j,
energy_grid[i].xs_ptrs[j],
(energy_grid[i].xs_ptrs[j])->energy
);
*/
}
|
pfmg_setup.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "pfmg.h"
#define DEBUG 0
#define hypre_PFMGSetCIndex(cdir, cindex) \
{ \
hypre_SetIndex3(cindex, 0, 0, 0); \
hypre_IndexD(cindex, cdir) = 0; \
}
#define hypre_PFMGSetFIndex(cdir, findex) \
{ \
hypre_SetIndex3(findex, 0, 0, 0); \
hypre_IndexD(findex, cdir) = 1; \
}
#define hypre_PFMGSetStride(cdir, stride) \
{ \
hypre_SetIndex3(stride, 1, 1, 1); \
hypre_IndexD(stride, cdir) = 2; \
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMGSetup( void *pfmg_vdata,
hypre_StructMatrix *A,
hypre_StructVector *b,
hypre_StructVector *x )
{
hypre_PFMGData *pfmg_data = (hypre_PFMGData *)pfmg_vdata;
MPI_Comm comm = (pfmg_data -> comm);
HYPRE_Int relax_type = (pfmg_data -> relax_type);
HYPRE_Int usr_jacobi_weight= (pfmg_data -> usr_jacobi_weight);
HYPRE_Real jacobi_weight = (pfmg_data -> jacobi_weight);
HYPRE_Int skip_relax = (pfmg_data -> skip_relax);
HYPRE_Real *dxyz = (pfmg_data -> dxyz);
HYPRE_Int rap_type;
HYPRE_Int max_iter;
HYPRE_Int max_levels;
HYPRE_Int num_levels;
hypre_Index cindex;
hypre_Index findex;
hypre_Index stride;
hypre_Index coarsen;
HYPRE_Int *cdir_l;
HYPRE_Int *active_l;
hypre_StructGrid **grid_l;
hypre_StructGrid **P_grid_l;
HYPRE_Real *data;
HYPRE_Int data_size = 0;
HYPRE_Real *relax_weights;
HYPRE_Real *mean, *deviation;
HYPRE_Real alpha, beta;
hypre_StructMatrix **A_l;
hypre_StructMatrix **P_l;
hypre_StructMatrix **RT_l;
hypre_StructVector **b_l;
hypre_StructVector **x_l;
/* temp vectors */
hypre_StructVector **tx_l;
hypre_StructVector **r_l;
hypre_StructVector **e_l;
void **relax_data_l;
void **matvec_data_l;
void **restrict_data_l;
void **interp_data_l;
hypre_StructGrid *grid;
HYPRE_Int ndim;
hypre_Box *cbox;
HYPRE_Real min_dxyz;
HYPRE_Int cdir, periodic, cmaxsize;
HYPRE_Int d, l;
HYPRE_Int dxyz_flag;
HYPRE_Int b_num_ghost[] = {0, 0, 0, 0, 0, 0};
HYPRE_Int x_num_ghost[] = {1, 1, 1, 1, 1, 1};
#if DEBUG
char filename[255];
#endif
HYPRE_ANNOTATION_BEGIN("PFMG.setup");
/*-----------------------------------------------------
* Set up coarse grids
*-----------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
ndim = hypre_StructGridNDim(grid);
/* Compute a new max_levels value based on the grid */
cbox = hypre_BoxDuplicate(hypre_StructGridBoundingBox(grid));
max_levels = 1;
for (d = 0; d < ndim; d++)
{
max_levels += hypre_Log2(hypre_BoxSizeD(cbox, d)) + 2;
}
if ((pfmg_data -> max_levels) > 0)
{
max_levels = hypre_min(max_levels, (pfmg_data -> max_levels));
}
(pfmg_data -> max_levels) = max_levels;
/* compute dxyz */
dxyz_flag= 0;
if ((dxyz[0] == 0) || (dxyz[1] == 0) || (dxyz[2] == 0))
{
mean = hypre_CTAlloc(HYPRE_Real, 3);
deviation = hypre_CTAlloc(HYPRE_Real, 3);
hypre_PFMGComputeDxyz(A, dxyz, mean, deviation);
for (d = 0; d < ndim; d++)
{
deviation[d] -= mean[d]*mean[d];
/* square of coeff. of variation */
if (deviation[d]/(mean[d]*mean[d]) > .1)
{
dxyz_flag= 1;
break;
}
}
hypre_TFree(mean);
hypre_TFree(deviation);
}
grid_l = hypre_TAlloc(hypre_StructGrid *, max_levels);
hypre_StructGridRef(grid, &grid_l[0]);
P_grid_l = hypre_TAlloc(hypre_StructGrid *, max_levels);
P_grid_l[0] = NULL;
cdir_l = hypre_TAlloc(HYPRE_Int, max_levels);
active_l = hypre_TAlloc(HYPRE_Int, max_levels);
relax_weights = hypre_CTAlloc(HYPRE_Real, max_levels);
hypre_SetIndex3(coarsen, 1, 1, 1); /* forces relaxation on finest grid */
for (l = 0; ; l++)
{
/* determine cdir */
min_dxyz = dxyz[0] + dxyz[1] + dxyz[2] + 1;
cdir = -1;
alpha = 0.0;
for (d = 0; d < ndim; d++)
{
if ((hypre_BoxIMaxD(cbox, d) > hypre_BoxIMinD(cbox, d)) &&
(dxyz[d] < min_dxyz))
{
min_dxyz = dxyz[d];
cdir = d;
}
alpha += 1.0/(dxyz[d]*dxyz[d]);
}
relax_weights[l] = 1.0;
/* If it's possible to coarsen, change relax_weights */
beta = 0.0;
if (cdir != -1)
{
if (dxyz_flag)
{
relax_weights[l] = 2.0/3.0;
}
else
{
for (d = 0; d < ndim; d++)
{
if (d != cdir)
{
beta += 1.0/(dxyz[d]*dxyz[d]);
}
}
if (beta == alpha)
{
alpha = 0.0;
}
else
{
alpha = beta/alpha;
}
/* determine level Jacobi weights */
if (ndim > 1)
{
relax_weights[l] = 2.0/(3.0 - alpha);
}
else
{
relax_weights[l] = 2.0/3.0; /* always 2/3 for 1-d */
}
}
}
if (cdir != -1)
{
/* don't coarsen if a periodic direction and not divisible by 2 */
periodic = hypre_IndexD(hypre_StructGridPeriodic(grid_l[l]), cdir);
if ((periodic) && (periodic % 2))
{
cdir = -1;
}
/* don't coarsen if we've reached max_levels */
if (l == (max_levels - 1))
{
cdir = -1;
}
}
/* stop coarsening */
if (cdir == -1)
{
active_l[l] = 1; /* forces relaxation on coarsest grid */
cmaxsize = 0;
for (d = 0; d < ndim; d++)
{
cmaxsize = hypre_max(cmaxsize, hypre_BoxSizeD(cbox, d));
}
break;
}
cdir_l[l] = cdir;
if (hypre_IndexD(coarsen, cdir) != 0)
{
/* coarsened previously in this direction, relax level l */
active_l[l] = 1;
hypre_SetIndex3(coarsen, 0, 0, 0);
hypre_IndexD(coarsen, cdir) = 1;
}
else
{
active_l[l] = 0;
hypre_IndexD(coarsen, cdir) = 1;
}
/* set cindex, findex, and stride */
hypre_PFMGSetCIndex(cdir, cindex);
hypre_PFMGSetFIndex(cdir, findex);
hypre_PFMGSetStride(cdir, stride);
/* update dxyz and coarsen cbox*/
dxyz[cdir] *= 2;
hypre_ProjectBox(cbox, cindex, stride);
hypre_StructMapFineToCoarse(hypre_BoxIMin(cbox), cindex, stride,
hypre_BoxIMin(cbox));
hypre_StructMapFineToCoarse(hypre_BoxIMax(cbox), cindex, stride,
hypre_BoxIMax(cbox));
/* build the interpolation grid */
hypre_StructCoarsen(grid_l[l], findex, stride, 0, &P_grid_l[l+1]);
/* build the coarse grid */
hypre_StructCoarsen(grid_l[l], cindex, stride, 1, &grid_l[l+1]);
}
num_levels = l + 1;
/* free up some things */
hypre_BoxDestroy(cbox);
/* set all levels active if skip_relax = 0 */
if (!skip_relax)
{
for (l = 0; l < num_levels; l++)
{
active_l[l] = 1;
}
}
(pfmg_data -> num_levels) = num_levels;
(pfmg_data -> cdir_l) = cdir_l;
(pfmg_data -> grid_l) = grid_l;
(pfmg_data -> P_grid_l) = P_grid_l;
/*-----------------------------------------------------
* Set up matrix and vector structures
*-----------------------------------------------------*/
/*-----------------------------------------------------
* Modify the rap_type if red-black Gauss-Seidel is
* used. Red-black gs is used only in the non-Galerkin
* case.
*-----------------------------------------------------*/
if (relax_type == 2 || relax_type == 3) /* red-black gs */
{
(pfmg_data -> rap_type)= 1;
}
rap_type = (pfmg_data -> rap_type);
A_l = hypre_TAlloc(hypre_StructMatrix *, num_levels);
P_l = hypre_TAlloc(hypre_StructMatrix *, num_levels - 1);
RT_l = hypre_TAlloc(hypre_StructMatrix *, num_levels - 1);
b_l = hypre_TAlloc(hypre_StructVector *, num_levels);
x_l = hypre_TAlloc(hypre_StructVector *, num_levels);
tx_l = hypre_TAlloc(hypre_StructVector *, num_levels);
r_l = tx_l;
e_l = tx_l;
A_l[0] = hypre_StructMatrixRef(A);
b_l[0] = hypre_StructVectorRef(b);
x_l[0] = hypre_StructVectorRef(x);
tx_l[0] = hypre_StructVectorCreate(comm, grid_l[0]);
hypre_StructVectorSetNumGhost(tx_l[0], x_num_ghost);
hypre_StructVectorInitializeShell(tx_l[0]);
data_size += hypre_StructVectorDataSize(tx_l[0]);
for (l = 0; l < (num_levels - 1); l++)
{
cdir = cdir_l[l];
P_l[l] = hypre_PFMGCreateInterpOp(A_l[l], P_grid_l[l+1], cdir, rap_type);
hypre_StructMatrixInitializeShell(P_l[l]);
data_size += hypre_StructMatrixDataSize(P_l[l]);
if (hypre_StructMatrixSymmetric(A))
{
RT_l[l] = P_l[l];
}
else
{
RT_l[l] = P_l[l];
#if 0
/* Allow RT != P for non symmetric case */
/* NOTE: Need to create a non-pruned grid for this to work */
RT_l[l] = hypre_PFMGCreateRestrictOp(A_l[l], grid_l[l+1], cdir);
hypre_StructMatrixInitializeShell(RT_l[l]);
data_size += hypre_StructMatrixDataSize(RT_l[l]);
#endif
}
A_l[l+1] = hypre_PFMGCreateRAPOp(RT_l[l], A_l[l], P_l[l],
grid_l[l+1], cdir, rap_type);
hypre_StructMatrixInitializeShell(A_l[l+1]);
data_size += hypre_StructMatrixDataSize(A_l[l+1]);
b_l[l+1] = hypre_StructVectorCreate(comm, grid_l[l+1]);
hypre_StructVectorSetNumGhost(b_l[l+1], b_num_ghost);
hypre_StructVectorInitializeShell(b_l[l+1]);
data_size += hypre_StructVectorDataSize(b_l[l+1]);
x_l[l+1] = hypre_StructVectorCreate(comm, grid_l[l+1]);
hypre_StructVectorSetNumGhost(x_l[l+1], x_num_ghost);
hypre_StructVectorInitializeShell(x_l[l+1]);
data_size += hypre_StructVectorDataSize(x_l[l+1]);
tx_l[l+1] = hypre_StructVectorCreate(comm, grid_l[l+1]);
hypre_StructVectorSetNumGhost(tx_l[l+1], x_num_ghost);
hypre_StructVectorInitializeShell(tx_l[l+1]);
}
data = hypre_SharedCTAlloc(HYPRE_Real, data_size);
(pfmg_data -> data) = data;
hypre_StructVectorInitializeData(tx_l[0], data);
hypre_StructVectorAssemble(tx_l[0]);
data += hypre_StructVectorDataSize(tx_l[0]);
for (l = 0; l < (num_levels - 1); l++)
{
hypre_StructMatrixInitializeData(P_l[l], data);
data += hypre_StructMatrixDataSize(P_l[l]);
#if 0
/* Allow R != PT for non symmetric case */
if (!hypre_StructMatrixSymmetric(A))
{
hypre_StructMatrixInitializeData(RT_l[l], data);
data += hypre_StructMatrixDataSize(RT_l[l]);
}
#endif
hypre_StructMatrixInitializeData(A_l[l+1], data);
data += hypre_StructMatrixDataSize(A_l[l+1]);
hypre_StructVectorInitializeData(b_l[l+1], data);
hypre_StructVectorAssemble(b_l[l+1]);
data += hypre_StructVectorDataSize(b_l[l+1]);
hypre_StructVectorInitializeData(x_l[l+1], data);
hypre_StructVectorAssemble(x_l[l+1]);
data += hypre_StructVectorDataSize(x_l[l+1]);
hypre_StructVectorInitializeData(tx_l[l+1],
hypre_StructVectorData(tx_l[0]));
hypre_StructVectorAssemble(tx_l[l+1]);
}
(pfmg_data -> A_l) = A_l;
(pfmg_data -> P_l) = P_l;
(pfmg_data -> RT_l) = RT_l;
(pfmg_data -> b_l) = b_l;
(pfmg_data -> x_l) = x_l;
(pfmg_data -> tx_l) = tx_l;
(pfmg_data -> r_l) = r_l;
(pfmg_data -> e_l) = e_l;
/*-----------------------------------------------------
* Set up multigrid operators and call setup routines
*-----------------------------------------------------*/
relax_data_l = hypre_TAlloc(void *, num_levels);
matvec_data_l = hypre_TAlloc(void *, num_levels);
restrict_data_l = hypre_TAlloc(void *, num_levels);
interp_data_l = hypre_TAlloc(void *, num_levels);
for (l = 0; l < (num_levels - 1); l++)
{
cdir = cdir_l[l];
hypre_PFMGSetCIndex(cdir, cindex);
hypre_PFMGSetFIndex(cdir, findex);
hypre_PFMGSetStride(cdir, stride);
/* set up interpolation operator */
hypre_PFMGSetupInterpOp(A_l[l], cdir, findex, stride, P_l[l], rap_type);
/* set up the restriction operator */
#if 0
/* Allow R != PT for non symmetric case */
if (!hypre_StructMatrixSymmetric(A))
hypre_PFMGSetupRestrictOp(A_l[l], tx_l[l],
cdir, cindex, stride, RT_l[l]);
#endif
/* set up the coarse grid operator */
hypre_PFMGSetupRAPOp(RT_l[l], A_l[l], P_l[l],
cdir, cindex, stride, rap_type, A_l[l+1]);
/* set up the interpolation routine */
interp_data_l[l] = hypre_SemiInterpCreate();
hypre_SemiInterpSetup(interp_data_l[l], P_l[l], 0, x_l[l+1], e_l[l],
cindex, findex, stride);
/* set up the restriction routine */
restrict_data_l[l] = hypre_SemiRestrictCreate();
hypre_SemiRestrictSetup(restrict_data_l[l], RT_l[l], 1, r_l[l], b_l[l+1],
cindex, findex, stride);
}
/*-----------------------------------------------------
* Check for zero diagonal on coarsest grid, occurs with
* singular problems like full Neumann or full periodic.
* Note that a processor with zero diagonal will set
* active_l =0, other processors will not. This is OK
* as we only want to avoid the division by zero on the
* one processor which owns the single coarse grid
* point.
*-----------------------------------------------------*/
if ( hypre_ZeroDiagonal(A_l[l]))
{
active_l[l] = 0;
}
/* set up fine grid relaxation */
relax_data_l[0] = hypre_PFMGRelaxCreate(comm);
hypre_PFMGRelaxSetTol(relax_data_l[0], 0.0);
if (usr_jacobi_weight)
{
hypre_PFMGRelaxSetJacobiWeight(relax_data_l[0], jacobi_weight);
}
else
{
hypre_PFMGRelaxSetJacobiWeight(relax_data_l[0], relax_weights[0]);
}
hypre_PFMGRelaxSetType(relax_data_l[0], relax_type);
hypre_PFMGRelaxSetTempVec(relax_data_l[0], tx_l[0]);
hypre_PFMGRelaxSetup(relax_data_l[0], A_l[0], b_l[0], x_l[0]);
if (num_levels > 1)
{
for (l = 1; l < num_levels; l++)
{
/* set relaxation parameters */
if (active_l[l])
{
relax_data_l[l] = hypre_PFMGRelaxCreate(comm);
hypre_PFMGRelaxSetTol(relax_data_l[l], 0.0);
if (usr_jacobi_weight)
{
hypre_PFMGRelaxSetJacobiWeight(relax_data_l[l], jacobi_weight);
}
else
{
hypre_PFMGRelaxSetJacobiWeight(relax_data_l[l], relax_weights[l]);
}
hypre_PFMGRelaxSetType(relax_data_l[l], relax_type);
hypre_PFMGRelaxSetTempVec(relax_data_l[l], tx_l[l]);
}
}
/* change coarsest grid relaxation parameters */
l = num_levels - 1;
if (active_l[l])
{
HYPRE_Int maxwork, maxiter;
hypre_PFMGRelaxSetType(relax_data_l[l], 0);
/* do no more work on the coarsest grid than the cost of a V-cycle
* (estimating roughly 4 communications per V-cycle level) */
maxwork = 4*num_levels;
/* do sweeps proportional to the coarsest grid size */
maxiter = hypre_min(maxwork, cmaxsize);
#if 0
hypre_printf("maxwork = %d, cmaxsize = %d, maxiter = %d\n",
maxwork, cmaxsize, maxiter);
#endif
hypre_PFMGRelaxSetMaxIter(relax_data_l[l], maxiter);
}
/* call relax setup */
for (l = 1; l < num_levels; l++)
{
if (active_l[l])
{
hypre_PFMGRelaxSetup(relax_data_l[l], A_l[l], b_l[l], x_l[l]);
}
}
}
hypre_TFree(relax_weights);
for (l = 0; l < num_levels; l++)
{
/* set up the residual routine */
matvec_data_l[l] = hypre_StructMatvecCreate();
hypre_StructMatvecSetup(matvec_data_l[l], A_l[l], x_l[l]);
}
(pfmg_data -> active_l) = active_l;
(pfmg_data -> relax_data_l) = relax_data_l;
(pfmg_data -> matvec_data_l) = matvec_data_l;
(pfmg_data -> restrict_data_l) = restrict_data_l;
(pfmg_data -> interp_data_l) = interp_data_l;
/*-----------------------------------------------------
* Allocate space for log info
*-----------------------------------------------------*/
if ((pfmg_data -> logging) > 0)
{
max_iter = (pfmg_data -> max_iter);
(pfmg_data -> norms) = hypre_TAlloc(HYPRE_Real, max_iter);
(pfmg_data -> rel_norms) = hypre_TAlloc(HYPRE_Real, max_iter);
}
#if DEBUG
for (l = 0; l < (num_levels - 1); l++)
{
hypre_sprintf(filename, "zout_A.%02d", l);
hypre_StructMatrixPrint(filename, A_l[l], 0);
hypre_sprintf(filename, "zout_P.%02d", l);
hypre_StructMatrixPrint(filename, P_l[l], 0);
}
hypre_sprintf(filename, "zout_A.%02d", l);
hypre_StructMatrixPrint(filename, A_l[l], 0);
#endif
HYPRE_ANNOTATION_END("PFMG.setup");
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMGComputeDxyz( hypre_StructMatrix *A,
HYPRE_Real *dxyz,
HYPRE_Real *mean,
HYPRE_Real *deviation)
{
hypre_BoxArray *compute_boxes;
hypre_Box *compute_box;
hypre_Box *A_dbox;
HYPRE_Int Ai;
HYPRE_Real *Ap;
HYPRE_Real cxyz[3], sqcxyz[3], tcxyz[3];
HYPRE_Real cxyz_max;
HYPRE_Int tot_size;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
HYPRE_Int constant_coefficient;
HYPRE_Int Astenc;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index stride;
HYPRE_Int i, si, d, sdiag;
HYPRE_Real cx, cy, cz, sqcx, sqcy, sqcz, tcx, tcy, tcz, diag;
/*----------------------------------------------------------
* Initialize some things
*----------------------------------------------------------*/
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
hypre_SetIndex3(stride, 1, 1, 1);
/*----------------------------------------------------------
* Compute cxyz (use arithmetic mean)
*----------------------------------------------------------*/
cx = 0.0;
cy = 0.0;
cz = 0.0;
sqcx = 0.0;
sqcy = 0.0;
sqcz = 0.0;
constant_coefficient = hypre_StructMatrixConstantCoefficient(A);
compute_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(A));
tot_size= hypre_StructGridGlobalSize(hypre_StructMatrixGrid(A));
/* find diagonal stencil entry */
for (si = 0; si < stencil_size; si++)
{
if ((hypre_IndexD(stencil_shape[si], 0) == 0) &&
(hypre_IndexD(stencil_shape[si], 1) == 0) &&
(hypre_IndexD(stencil_shape[si], 2) == 0))
{
sdiag = si;
break;
}
}
hypre_ForBoxI(i, compute_boxes)
{
compute_box = hypre_BoxArrayBox(compute_boxes, i);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
start = hypre_BoxIMin(compute_box);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
/* all coefficients constant or variable diagonal */
if ( constant_coefficient )
{
Ai = hypre_CCBoxIndexRank( A_dbox, start );
tcx = 0.0;
tcy = 0.0;
tcz = 0.0;
/* get sign of diagonal */
Ap = hypre_StructMatrixBoxData(A, i, sdiag);
diag = 1.0;
if (Ap[Ai] < 0)
{
diag = -1.0;
}
for (si = 0; si < stencil_size; si++)
{
Ap = hypre_StructMatrixBoxData(A, i, si);
/* x-direction */
Astenc = hypre_IndexD(stencil_shape[si], 0);
if (Astenc)
{
tcx -= Ap[Ai]*diag;
}
/* y-direction */
Astenc = hypre_IndexD(stencil_shape[si], 1);
if (Astenc)
{
tcy -= Ap[Ai]*diag;
}
/* z-direction */
Astenc = hypre_IndexD(stencil_shape[si], 2);
if (Astenc)
{
tcz -= Ap[Ai]*diag;
}
}
cx += tcx;
cy += tcy;
cz += tcz;
sqcx += (tcx*tcx);
sqcy += (tcy*tcy);
sqcz += (tcz*tcz);
}
/* constant_coefficient==0, all coefficients vary with space */
else
{
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(A), loop_size,
A_dbox, start, stride, Ai);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,si,Ap,diag,Astenc,tcx,tcy,tcz) reduction(+:cx,cy,cz,sqcx,sqcy,sqcz) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(Ai)
{
tcx = 0.0;
tcy = 0.0;
tcz = 0.0;
/* get sign of diagonal */
Ap = hypre_StructMatrixBoxData(A, i, sdiag);
diag = 1.0;
if (Ap[Ai] < 0)
{
diag = -1.0;
}
for (si = 0; si < stencil_size; si++)
{
Ap = hypre_StructMatrixBoxData(A, i, si);
/* x-direction */
Astenc = hypre_IndexD(stencil_shape[si], 0);
if (Astenc)
{
tcx -= Ap[Ai]*diag;
}
/* y-direction */
Astenc = hypre_IndexD(stencil_shape[si], 1);
if (Astenc)
{
tcy -= Ap[Ai]*diag;
}
/* z-direction */
Astenc = hypre_IndexD(stencil_shape[si], 2);
if (Astenc)
{
tcz -= Ap[Ai]*diag;
}
}
cx += tcx;
cy += tcy;
cz += tcz;
sqcx += (tcx*tcx);
sqcy += (tcy*tcy);
sqcz += (tcz*tcz);
}
hypre_BoxLoop1End(Ai);
}
}
cxyz[0] = cx;
cxyz[1] = cy;
cxyz[2] = cz;
sqcxyz[0] = sqcx;
sqcxyz[1] = sqcy;
sqcxyz[2] = sqcz;
/*----------------------------------------------------------
* Compute dxyz
*----------------------------------------------------------*/
/* all coefficients constant or variable diagonal */
if ( constant_coefficient )
{
for (d= 0; d< 3; d++)
{
mean[d]= cxyz[d];
deviation[d]= sqcxyz[d];
}
}
/* constant_coefficient==0, all coefficients vary with space */
else
{
tcxyz[0] = cxyz[0];
tcxyz[1] = cxyz[1];
tcxyz[2] = cxyz[2];
hypre_MPI_Allreduce(tcxyz, cxyz, 3, HYPRE_MPI_REAL, hypre_MPI_SUM,
hypre_StructMatrixComm(A));
tcxyz[0] = sqcxyz[0];
tcxyz[1] = sqcxyz[1];
tcxyz[2] = sqcxyz[2];
hypre_MPI_Allreduce(tcxyz, sqcxyz, 3, HYPRE_MPI_REAL, hypre_MPI_SUM,
hypre_StructMatrixComm(A));
for (d= 0; d< 3; d++)
{
mean[d]= cxyz[d]/tot_size;
deviation[d]= sqcxyz[d]/tot_size;
}
}
cxyz_max = 0.0;
for (d = 0; d < 3; d++)
{
cxyz_max = hypre_max(cxyz_max, cxyz[d]);
}
if (cxyz_max == 0.0)
{
cxyz_max = 1.0;
}
for (d = 0; d < 3; d++)
{
if (cxyz[d] > 0)
{
cxyz[d] /= cxyz_max;
dxyz[d] = sqrt(1.0 / cxyz[d]);
}
else
{
dxyz[d] = 1.0e+123;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Returns 1 if there is a diagonal coefficient that is zero,
* otherwise returns 0.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ZeroDiagonal( hypre_StructMatrix *A )
{
hypre_BoxArray *compute_boxes;
hypre_Box *compute_box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index stride;
HYPRE_Real *Ap;
hypre_Box *A_dbox;
HYPRE_Int Ai;
HYPRE_Int i;
hypre_Index diag_index;
HYPRE_Real diag_product = 1.0;
HYPRE_Int zero_diag = 0;
HYPRE_Int constant_coefficient;
/*----------------------------------------------------------
* Initialize some things
*----------------------------------------------------------*/
hypre_SetIndex3(stride, 1, 1, 1);
hypre_SetIndex3(diag_index, 0, 0, 0);
/* Need to modify here */
constant_coefficient = hypre_StructMatrixConstantCoefficient(A);
compute_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(A));
hypre_ForBoxI(i, compute_boxes)
{
compute_box = hypre_BoxArrayBox(compute_boxes, i);
start = hypre_BoxIMin(compute_box);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
Ap = hypre_StructMatrixExtractPointerByIndex(A, i, diag_index);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
if ( constant_coefficient )
{
Ai = hypre_CCBoxIndexRank( A_dbox, start );
diag_product *= Ap[Ai];
}
else
{
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(A), loop_size,
A_dbox, start, stride, Ai);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai) reduction(*:diag_product) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(Ai)
{
diag_product *= Ap[Ai];
}
hypre_BoxLoop1End(Ai);
}
}
if (diag_product == 0)
{
zero_diag = 1;
}
return zero_diag;
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
INIT();
//
// Test: Master.
//
for (int t = 0; t <= 224; t++) {
int threads[1]; threads[0] = t;
TEST({
for (int i = 0; i < N; i++) {
A[i] = 1;
B[i] = 0;
}
_Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])")
{
_Pragma("omp for nowait schedule(static,1)")
for (int i = 0; i < N; i++) {
B[i] = D[i] - E[i];
}
_Pragma("omp master")
{
for (int i = 0; i < N; i++) {
A[i] += C[i] + D[i];
}
}
_Pragma("omp barrier")
_Pragma("omp for schedule(static,1)")
for (int i = 0; i < N; i++) {
B[i] += A[i];
}
}
}, VERIFY(0, N, B[i], 3*i+2));
}
return 0;
}
|
GB_binop__eq_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_uint32
// A.*B function (eWiseMult): GB_AemultB__eq_uint32
// A*D function (colscale): GB_AxD__eq_uint32
// D*A function (rowscale): GB_DxB__eq_uint32
// C+=B function (dense accum): GB_Cdense_accumB__eq_uint32
// C+=b function (dense accum): GB_Cdense_accumb__eq_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_uint32
// C=scalar+B GB_bind1st__eq_uint32
// C=scalar+B' GB_bind1st_tran__eq_uint32
// C=A+scalar GB_bind2nd__eq_uint32
// C=A'+scalar GB_bind2nd_tran__eq_uint32
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__eq_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
J1OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by:
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffOneBodyJastrowOrbital.h"
#include "Utilities/qmc_common.h"
#include "CPU/SIMD/aligned_allocator.hpp"
#include "CPU/SIMD/algorithm.hpp"
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for one-body Jastrow function using multiple functors
*/
template<class FT>
struct J1OrbitalSoA : public WaveFunctionComponent
{
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using DistRow = DistanceTableData::DistRow;
using DisplRow = DistanceTableData::DisplRow;
///table index
const int myTableID;
///number of ions
int Nions;
///number of electrons
int Nelec;
///number of groups
int NumGroups;
///reference to the sources (ions)
const ParticleSet& Ions;
valT curAt;
valT curLap;
posT curGrad;
///\f$Vat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Vat;
aligned_vector<valT> U, dU, d2U, d3U;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
Vector<posT> Grad;
Vector<valT> Lap;
///Container for \f$F[ig*NumGroups+jg]\f$
std::vector<FT*> F;
J1OrbitalSoA(const std::string& obj_name, const ParticleSet& ions, ParticleSet& els)
: WaveFunctionComponent("J1OrbitalSoA", obj_name), myTableID(els.addTable(ions)), Ions(ions)
{
if (myName.empty())
throw std::runtime_error("J1OrbitalSoA object name cannot be empty!");
initialize(els);
}
J1OrbitalSoA(const J1OrbitalSoA& rhs) = delete;
~J1OrbitalSoA()
{
for (int i = 0; i < F.size(); ++i)
if (F[i] != nullptr)
delete F[i];
}
/* initialize storage */
void initialize(const ParticleSet& els)
{
Nions = Ions.getTotalNum();
NumGroups = Ions.getSpeciesSet().getTotalNum();
F.resize(std::max(NumGroups, 4), nullptr);
if (NumGroups > 1 && !Ions.IsGrouped)
{
NumGroups = 0;
}
Nelec = els.getTotalNum();
Vat.resize(Nelec);
Grad.resize(Nelec);
Lap.resize(Nelec);
U.resize(Nions);
dU.resize(Nions);
d2U.resize(Nions);
d3U.resize(Nions);
DistCompressed.resize(Nions);
DistIndice.resize(Nions);
}
void addFunc(int source_type, FT* afunc, int target_type = -1)
{
if (F[source_type] != nullptr)
delete F[source_type];
F[source_type] = afunc;
}
void recompute(ParticleSet& P)
{
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
computeU3(P, iat, d_ie.getDistRow(iat));
Vat[iat] = simd::accumulate_n(U.data(), Nions, valT());
Lap[iat] = accumulateGL(dU.data(), d2U.data(), d_ie.getDisplRow(iat), Grad[iat]);
}
}
LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L)
{
return evaluateGL(P, G, L, true);
}
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
const DistanceTableData& d_ie(P.getDistTable(myTableID));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int iel = 0; iel < Nelec; ++iel)
{
const auto& dist = d_ie.getDistRow(iel);
const auto& displ = d_ie.getDisplRow(iel);
for (int iat = 0; iat < Nions; iat++)
{
int gid = Ions.GroupID[iat];
auto* func = F[gid];
if (func != nullptr)
{
RealType r = dist[iat];
RealType rinv = 1.0 / r;
PosType dr = displ[iat];
func->evaluate(r, dudr, d2udr2);
grad_grad_psi[iel] -= rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
}
}
}
}
PsiValueType ratio(ParticleSet& P, int iat)
{
UpdateMode = ORB_PBYP_RATIO;
curAt = computeU(P.getDistTable(myTableID).getTempDists());
return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt));
}
inline void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] = std::exp(Vat[VP.refPtcl] - computeU(VP.getDistTable(myTableID).getDistRow(k)));
}
inline valT computeU(const DistRow& dist)
{
valT curVat(0);
if (NumGroups > 0)
{
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] != nullptr)
curVat += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
curVat += F[gid]->evaluate(dist[c]);
}
}
return curVat;
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& dist = P.getDistTable(myTableID).getTempDists();
curAt = valT(0);
if (NumGroups > 0)
{
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] != nullptr)
curAt += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
curAt += F[gid]->evaluate(dist[c]);
}
}
for (int i = 0; i < Nelec; ++i)
ratios[i] = std::exp(Vat[i] - curAt);
}
inline LogValueType evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false)
{
if (fromscratch)
recompute(P);
for (size_t iat = 0; iat < Nelec; ++iat)
G[iat] += Grad[iat];
for (size_t iat = 0; iat < Nelec; ++iat)
L[iat] -= Lap[iat];
return LogValue = -simd::accumulate_n(Vat.data(), Nelec, valT());
}
/** compute gradient and lap
* @return lap
*/
inline valT accumulateGL(const valT* restrict du, const valT* restrict d2u, const DisplRow& displ, posT& grad) const
{
valT lap(0);
constexpr valT lapfac = OHMMS_DIM - RealType(1);
//#pragma omp simd reduction(+:lap)
for (int jat = 0; jat < Nions; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
//#pragma omp simd reduction(+:s)
for (int jat = 0; jat < Nions; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return lap;
}
/** compute U, dU and d2U
* @param P quantum particleset
* @param iat the moving particle
* @param dist starting address of the distances of the ions wrt the iat-th particle
*/
inline void computeU3(ParticleSet& P, int iat, const DistRow& dist)
{
if (NumGroups > 0)
{ //ions are grouped
constexpr valT czero(0);
std::fill_n(U.data(), Nions, czero);
std::fill_n(dU.data(), Nions, czero);
std::fill_n(d2U.data(), Nions, czero);
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] == nullptr)
continue;
F[jg]->evaluateVGL(-1, Ions.first(jg), Ions.last(jg), dist.data(), U.data(), dU.data(), d2U.data(),
DistCompressed.data(), DistIndice.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
{
U[c] = F[gid]->evaluate(dist[c], dU[c], d2U[c]);
dU[c] /= dist[c];
}
}
}
}
/** compute the gradient during particle-by-particle update
* @param P quantum particleset
* @param iat particle index
*/
GradType evalGrad(ParticleSet& P, int iat) { return GradType(Grad[iat]); }
/** compute the gradient during particle-by-particle update
* @param P quantum particleset
* @param iat particle index
*
* Using getTempDists(). curAt, curGrad and curLap are computed.
*/
PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(myTableID).getTempDists());
curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad);
curAt = simd::accumulate_n(U.data(), Nions, valT());
grad_iat += curGrad;
return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt));
}
/** Rejected move. Nothing to do */
inline void restore(int iat) {}
/** Accpted move. Update Vat[iat],Grad[iat] and Lap[iat] */
void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false)
{
if (UpdateMode == ORB_PBYP_RATIO)
{
computeU3(P, iat, P.getDistTable(myTableID).getTempDists());
curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad);
}
LogValue += Vat[iat] - curAt;
Vat[iat] = curAt;
Grad[iat] = curGrad;
Lap[iat] = curLap;
}
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Vat.begin(), Vat.end());
buf.add(Grad.begin(), Grad.end());
buf.add(Lap.begin(), Lap.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Vat.free();
Grad.free();
Lap.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Vat.attachReference(buf.lendReference<valT>(Nelec), Nelec);
Grad.attachReference(buf.lendReference<posT>(Nelec), Nelec);
Lap.attachReference(buf.lendReference<valT>(Nelec), Nelec);
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const
{
J1OrbitalSoA<FT>* j1copy = new J1OrbitalSoA<FT>(myName, Ions, tqp);
j1copy->Optimizable = Optimizable;
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
j1copy->addFunc(i, new FT(*F[i]));
}
if (dPsi)
{
j1copy->dPsi = dPsi->makeClone(tqp);
}
return j1copy;
}
/**@{ WaveFunctionComponent virtual functions that are not essential for the development */
void reportStatus(std::ostream& os)
{
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
F[i]->myVars.print(os);
}
}
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
{
F[i]->checkInVariables(active);
F[i]->checkInVariables(myVars);
}
}
}
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
for (size_t i = 0, n = F.size(); i < n; ++i)
if (F[i] != nullptr)
F[i]->checkOutVariables(active);
if (dPsi)
dPsi->checkOutVariables(active);
}
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
for (size_t i = 0, n = F.size(); i < n; ++i)
if (F[i] != nullptr)
F[i]->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
if (dPsi)
dPsi->resetParameters(active);
}
/**@} */
inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc)
{
GradType g_return(0.0);
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
const auto& dist = d_ie.getDistRow(iat);
const auto& displ = d_ie.getDisplRow(iat);
int gid = source.GroupID[isrc];
RealType r = dist[isrc];
RealType rinv = 1.0 / r;
PosType dr = displ[isrc];
if (F[gid] != nullptr)
{
U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]);
g_return -= dU[isrc] * rinv * dr;
}
}
return g_return;
}
inline GradType evalGradSource(ParticleSet& P,
ParticleSet& source,
int isrc,
TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad,
TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad)
{
GradType g_return(0.0);
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
const auto& dist = d_ie.getDistRow(iat);
const auto& displ = d_ie.getDisplRow(iat);
int gid = source.GroupID[isrc];
RealType r = dist[isrc];
RealType rinv = 1.0 / r;
PosType dr = displ[isrc];
if (F[gid] != nullptr)
{
U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]);
}
else
{
APP_ABORT("J1OrbitalSoa::evaluateGradSource: F[gid]==nullptr")
}
g_return -= dU[isrc] * rinv * dr;
//The following terms depend only on the radial component r. Thus,
//we compute them and mix with position vectors to acquire the full
//cartesian vector objects.
valT grad_component = (d2U[isrc] - dU[isrc] * rinv);
valT lapl_component = d3U[isrc] + 2 * rinv * grad_component;
for (int idim = 0; idim < OHMMS_DIM; idim++)
{
grad_grad[idim][iat] += dr[idim] * dr * rinv * rinv * grad_component;
grad_grad[idim][iat][idim] += rinv * dU[isrc];
lapl_grad[idim][iat] -= lapl_component * rinv * dr[idim];
}
}
return g_return;
}
};
} // namespace qmcplusplus
#endif
|
VerletClusterLists.h | /**
* @file VerletClusterLists.h
* @author nguyen
* @date 14.10.18
*/
#pragma once
#include <cmath>
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/CompatibleTraversals.h"
#include "autopas/containers/ParticleContainer.h"
#include "autopas/containers/verletClusterLists/VerletClusterMaths.h"
#include "autopas/containers/verletClusterLists/traversals/VerletClustersTraversalInterface.h"
#include "autopas/iterators/ParticleIterator.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/inBox.h"
namespace autopas {
/**
* Particles are divided into clusters.
* The VerletClusterLists class uses neighborhood lists for each cluster
* to calculate pairwise interactions of particles.
* It is optimized for a constant, i.e. particle independent, cutoff radius of
* the interaction.
* @tparam Particle
*/
template <class Particle>
class VerletClusterLists : public ParticleContainer<FullParticleCell<Particle>> {
/**
* the index type to access the particle cells
*/
using index_t = VerletClusterMaths::index_t;
public:
/**
* Constructor of the VerletClusterLists class.
* The neighbor lists are build using a estimated density.
* The box is divided into cuboids with roughly the
* same side length.
* @param boxMin the lower corner of the domain
* @param boxMax the upper corner of the domain
* @param cutoff the cutoff radius of the interaction
* @param skin the skin radius
* @param clusterSize size of clusters
*/
VerletClusterLists(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff,
double skin = 0, int clusterSize = 4)
: ParticleContainer<FullParticleCell<Particle>>(boxMin, boxMax, cutoff, skin),
_clusterSize(clusterSize),
_numClusters(0),
_boxMin(boxMin),
_boxMax(boxMax),
_skin(skin),
_cutoff(cutoff),
_neighborListIsNewton3(false),
_interactionLengthSqr((cutoff + skin) * (cutoff + skin)) {
rebuild(false);
}
ContainerOption getContainerType() const override { return ContainerOption::verletClusterLists; }
void iteratePairwise(TraversalInterface *traversal) override {
AutoPasLog(debug, "Using traversal {}.", traversal->getTraversalType().to_string());
auto *traversalInterface = dynamic_cast<VerletClustersTraversalInterface<Particle> *>(traversal);
if (traversalInterface) {
traversalInterface->setClusterLists(*this);
} else {
autopas::utils::ExceptionHandler::exception(
"Trying to use a traversal of wrong type in VerletClusterLists::iteratePairwise. TraversalID: {}",
traversal->getTraversalType());
}
traversal->initTraversal();
traversal->traverseParticlePairs();
traversal->endTraversal();
}
/**
* @copydoc VerletLists::addParticle()
*/
void addParticle(const Particle &p) override {
// add particle somewhere, because lists will be rebuild anyways
this->_cells[0].addParticle(p);
}
/**
* @copydoc VerletLists::addHaloParticle()
*/
void addHaloParticle(const Particle &haloParticle) override {
autopas::utils::ExceptionHandler::exception("VerletClusterLists.addHaloParticle not yet implemented.");
}
/**
* @copydoc autopas::ParticleContainerInterface::updateHaloParticle()
*/
bool updateHaloParticle(const Particle &haloParticle) override { throw std::runtime_error("not yet implemented"); }
/**
* @copydoc VerletLists::deleteHaloParticles
*/
void deleteHaloParticles() override {
// quick and dirty: iterate over all particles and delete halo particles
// @todo: make this proper
for (auto iter = this->begin(IteratorBehavior::haloOnly); iter.isValid(); ++iter) {
if (not iter->isOwned()) {
internal::deleteParticle(iter);
}
}
}
/**
* @copydoc VerletLists::updateContainer()
*/
AUTOPAS_WARN_UNUSED_RESULT
std::vector<Particle> updateContainer() override {
AutoPasLog(debug, "updating container");
// first delete all particles
this->deleteHaloParticles();
// next find invalid particles
std::vector<Particle> invalidParticles;
/// @todo: parallelize
for (auto iter = this->begin(IteratorBehavior::ownedOnly); iter.isValid(); ++iter) {
if (not utils::inBox(iter->getR(), _boxMin, _boxMax)) {
invalidParticles.push_back(*iter);
internal::deleteParticle(iter);
}
}
return invalidParticles;
}
bool isContainerUpdateNeeded() const override {
autopas::utils::ExceptionHandler::exception("VerletClusterLists.isContainerUpdateNeeded not yet implemented");
return false;
}
TraversalSelectorInfo getTraversalSelectorInfo() const override {
return TraversalSelectorInfo(_cellsPerDim, this->getInteractionLength(), {0., 0., 0.}, _clusterSize);
}
ParticleIteratorWrapper<Particle, true> begin(IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
return ParticleIteratorWrapper<Particle, true>(
new internal::ParticleIterator<Particle, FullParticleCell<Particle>, true>(&this->_cells));
}
ParticleIteratorWrapper<Particle, false> begin(
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override {
return ParticleIteratorWrapper<Particle, false>(
new internal::ParticleIterator<Particle, FullParticleCell<Particle>, false>(&this->_cells));
}
ParticleIteratorWrapper<Particle, true> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
// @todo implement this if bounding boxes are here
autopas::utils::ExceptionHandler::exception("VerletClusterLists.getRegionIterator not yet implemented.");
return ParticleIteratorWrapper<Particle, true>();
}
ParticleIteratorWrapper<Particle, false> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override {
// @todo implement this if bounding boxes are here
autopas::utils::ExceptionHandler::exception("VerletClusterLists.getRegionIterator not yet implemented.");
return ParticleIteratorWrapper<Particle, false>();
}
void rebuildNeighborLists(TraversalInterface *traversal) override { rebuild(traversal->getUseNewton3()); }
/**
* Helper method to iterate over all clusters.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @tparam inParallel If the iteration should be executed in parallel or sequential. See traverseClustersParallel()
* for thread safety.
* @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, int
* clusterSize, std::vector<Particle*> clusterNeighborList.
*/
template <bool inParallel, class LoopBody>
void traverseClusters(LoopBody &&loopBody) {
if (inParallel) {
traverseClustersParallel<LoopBody>(std::forward<LoopBody>(loopBody));
} else {
traverseClustersSequential<LoopBody>(std::forward<LoopBody>(loopBody));
}
}
/**
* Returns the ClusterIndexMap for usage in the traversals of this container.
* @return the ClusterIndexMap.
*/
const auto &getClusterIndexMap() const { return _clusterIndexMap; }
/**
* Returns the number of clusters in this container.
* @return The number of clusters in this container.
*/
auto getNumClusters() const { return _numClusters; }
/**
* Returns the neighbor lists of this container.
* @return the neighbor lists of this container.
*/
const auto &getNeighborLists() const { return _neighborLists; }
/**
* Returns the grid side length of the grids in the container.
* @return the grid side length of the grids in the container.
*/
auto getGridSideLength() const { return _gridSideLength; }
/**
* Returns the number of grids per dimension on the container.
* @return the number of grids per dimension on the container.
*/
auto getCellsPerDimension() const { return _cellsPerDim; }
/**
* Returns the 2D grid for the XY-plane of this container that defines the cluster towers.
* @return the grids of this container for usage in traversals.
*/
auto &getGrids() { return this->_cells; }
/**
* Returns the number of particles in each cluster.
* @return the number of particles in each cluster.
*/
auto getClusterSize() const { return _clusterSize; }
protected:
/**
* Helper method to sequentially iterate over all clusters.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, index_t
* clusterSize, std::vector<Particle*> clusterNeighborList.
*/
template <class LoopBody>
void traverseClustersSequential(LoopBody &&loopBody) {
for (index_t x = 0; x < _cellsPerDim[0]; x++) {
for (index_t y = 0; y < _cellsPerDim[1]; y++) {
index_t index = VerletClusterMaths::index1D(x, y, _cellsPerDim);
auto &grid = this->_cells[index];
auto &gridNeighborList = _neighborLists[index];
const index_t numClustersInGrid = grid.numParticles() / _clusterSize;
for (index_t clusterInGrid = 0; clusterInGrid < numClustersInGrid; clusterInGrid++) {
Particle *iClusterStart = &grid[clusterInGrid * _clusterSize];
auto &clusterNeighborList = gridNeighborList[clusterInGrid];
loopBody(iClusterStart, _clusterSize, clusterNeighborList);
}
}
}
}
/**
* Helper method to iterate over all clusters in parallel.
*
* It is always safe to modify the particles in the cluster that is passed to the given loop body. However, when
* modifying particles from other clusters, the caller has to make sure that no data races occur. Particles must not
* be added or removed during the traversal.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, index_t
* clusterSize, std::vector<Particle*> clusterNeighborList.
*/
template <class LoopBody>
void traverseClustersParallel(LoopBody &&loopBody) {
const index_t endX = _cellsPerDim[0];
const index_t endY = _cellsPerDim[1];
#if defined(AUTOPAS_OPENMP)
// @todo: find sensible chunksize
#pragma omp parallel for schedule(dynamic) collapse(2)
#endif
for (index_t x = 0; x < endX; x++) {
for (index_t y = 0; y < endY; y++) {
index_t index = VerletClusterMaths::index1D(x, y, _cellsPerDim);
auto &grid = this->_cells[index];
auto &gridNeighborList = _neighborLists[index];
const index_t numClustersInGrid = grid.numParticles() / _clusterSize;
for (index_t clusterInGrid = 0; clusterInGrid < numClustersInGrid; clusterInGrid++) {
Particle *iClusterStart = &grid[clusterInGrid * _clusterSize];
auto &clusterNeighborList = gridNeighborList[clusterInGrid];
loopBody(iClusterStart, _clusterSize, clusterNeighborList);
}
}
}
}
/**
* Recalculate grids and clusters, build verlet lists and pad clusters.
* @param useNewton3 If the everything should be build using newton 3 or not.
*/
void rebuild(bool useNewton3) {
std::vector<Particle> invalidParticles = collectParticlesAndClearClusters();
auto boxSize = utils::ArrayMath::sub(_boxMax, _boxMin);
_gridSideLength = estimateOptimalGridSideLength(invalidParticles.size(), boxSize);
_gridSideLengthReciprocal = 1 / _gridSideLength;
_cellsPerDim = calculateCellsPerDim(boxSize);
// _cellsPerDim[2] is always 1
index_t numCells = _cellsPerDim[0] * _cellsPerDim[1];
// resize to number of grids
this->_cells.resize(numCells);
_neighborLists.resize(numCells);
sortParticlesIntoClusters(invalidParticles);
// sort by last dimension and reserve space for dummy particles
for (auto &cluster : this->_cells) {
cluster.sortByDim(2);
size_t size = cluster.numParticles();
size_t rest = size % _clusterSize;
if (rest > 0) cluster.reserve(size + (_clusterSize - rest));
}
clearNeighborLists();
_numClusters = buildClusterIndexMap();
updateVerletLists(useNewton3);
// fill last cluster with dummy particles, such that each cluster is a multiple of _clusterSize
padClusters();
}
/**
* Takes all particles from all clusters and returns them. Clusters are cleared.
* @return All particles in the container.
*/
std::vector<Particle> collectParticlesAndClearClusters() {
std::vector<Particle> invalidParticles;
for (auto &cluster : this->_cells) {
for (auto it = cluster.begin(); it.isValid(); ++it) {
invalidParticles.push_back(*it);
}
cluster.clear();
}
return invalidParticles;
}
/**
* Estimates the optimal grid side length.
* @param numParticles The number of particles in the container.
* @param boxSize The size of the domain.
* @return an estimated optimal grid side length.
*/
virtual double estimateOptimalGridSideLength(size_t numParticles, std::array<double, 3> boxSize) const {
double volume = boxSize[0] * boxSize[1] * boxSize[2];
if (numParticles > 0) {
// estimate particle density
double density = numParticles / volume;
return std::cbrt(_clusterSize / density);
} else {
return std::max(boxSize[0], boxSize[1]);
}
}
/**
* Calculates the cells per dimension in the container using the _gridSideLengthReciprocal.
* @param boxSize the size of the domain.
* @return the cells per dimension in the container.
*/
std::array<index_t, 3> calculateCellsPerDim(std::array<double, 3> boxSize) const {
std::array<index_t, 3> cellsPerDim{};
for (int d = 0; d < 2; d++) {
cellsPerDim[d] = static_cast<index_t>(std::ceil(boxSize[d] * _gridSideLengthReciprocal));
// at least one cell
cellsPerDim[d] = std::max(cellsPerDim[d], 1ul);
}
cellsPerDim[2] = 1ul;
return cellsPerDim;
}
/**
* Sorts all passed particles in the appropriate clusters.
* @param particles The particles to sort in the clusters.
*/
void sortParticlesIntoClusters(std::vector<Particle> &particles) {
for (auto &particle : particles) {
if (utils::inBox(particle.getR(), _boxMin, _boxMax)) {
auto index = get1DIndexOfPosition(particle.getR());
this->_cells[index].addParticle(particle);
}
}
}
/**
* Clears all neighbor lists.
*/
void clearNeighborLists() {
for (auto &verlet : _neighborLists) {
verlet.clear();
}
}
/**
* Update the verlet lists.
*
* @param useNewton3 If newton 3 should be used to build the neighbor lists or not. If true, only saves neighbor
* clusters that have a higher index that the current cluster. (@see buildClusterIndexMap())
*/
void updateVerletLists(bool useNewton3) {
_neighborListIsNewton3 = useNewton3;
const int boxRange = static_cast<int>(std::ceil((_cutoff + _skin) * _gridSideLengthReciprocal));
const int gridMaxX = _cellsPerDim[0] - 1;
const int gridMaxY = _cellsPerDim[1] - 1;
// for all grids
for (int yi = 0; yi <= gridMaxY; yi++) {
for (int xi = 0; xi <= gridMaxX; xi++) {
auto &iGrid = this->_cells[VerletClusterMaths::index1D(xi, yi, _cellsPerDim)];
// calculate number of full clusters and rest
index_t iSize = iGrid.numParticles() / _clusterSize;
int iRest = iGrid.numParticles() % _clusterSize;
const int minX = std::max(xi - boxRange, 0);
const int minY = std::max(yi - boxRange, 0);
const int maxX = std::min(xi + boxRange, gridMaxX);
const int maxY = std::min(yi + boxRange, gridMaxY);
auto &iNeighbors = _neighborLists[VerletClusterMaths::index1D(xi, yi, _cellsPerDim)];
if (iRest > 0)
iNeighbors.resize(iSize + 1);
else
iNeighbors.resize(iSize);
addClustersOfNeighborGridsAsNeighborsIfInRange(iGrid, iSize, iRest, iNeighbors, minX, maxX, minY, maxY, xi, yi);
}
}
}
/**
* Iterates over neighbor grids of the i-th grid and adds all clusters in them that are within the cutoff radius to
* the neighbor list of the clusters in the i-th grid.
* @param iGrid The i-th grid.
* @param iSize The number of full clusters in the i-th grid.
* @param iRest If the last cluster is not full: The number of particles in the last cluster. 0 otherwise.
* @param iNeighbors The neighbor list of the i-th grid.
* @param minX
* @param maxX
* @param minY
* @param maxY
* @param xi The x-index of the i-th grid.
* @param yi the y-index of the i-th grid.
*/
void addClustersOfNeighborGridsAsNeighborsIfInRange(FullParticleCell<Particle> &iGrid, index_t iSize, int iRest,
std::vector<std::vector<Particle *>> &iNeighbors, const int minX,
const int maxX, const int minY, const int maxY, const int xi,
const int yi) {
// for all neighbor grids
for (int yj = minY; yj <= maxY; yj++) {
double distY = std::max(0, std::abs(yi - yj) - 1) * _gridSideLength;
for (int xj = minX; xj <= maxX; xj++) {
double distX = std::max(0, std::abs(xi - xj) - 1) * _gridSideLength;
// calculate distance in xy-plane and skip if already longer than cutoff
double distXYsqr = distX * distX + distY * distY;
if (distXYsqr <= _interactionLengthSqr) {
auto &jGrid = this->_cells[VerletClusterMaths::index1D(xj, yj, _cellsPerDim)];
// calculate number of full clusters and rest
const index_t jSize = jGrid.numParticles() / _clusterSize;
const int jRest = jGrid.numParticles() % _clusterSize;
// for all clusters in the i-th grid
for (index_t zi = 0; zi < iSize; zi++) {
addAllJClustersAsNeighborIfInRange(iGrid, zi, _clusterSize, iNeighbors, jGrid, jSize, jRest, distXYsqr);
}
// special case: last cluster of iGrid not full
if (iRest > 0) {
addAllJClustersAsNeighborIfInRange(iGrid, iSize, iRest, iNeighbors, jGrid, jSize, jRest, distXYsqr);
}
}
}
}
}
/**
* Adds all clusters in jGrid that are within the cutoff radius to the neighbor list of the given cluster in iGrid
* (iClusterIndex).
* @param iGrid The i-th grid.
* @param iClusterIndex The index of the cluster to work on in the i-th grid.
* @param iClusterSize The size of th cluster with index iClusterIndex in the i-th grid.
* @param iNeighbors The neighbor list of the i-th grid.
* @param jGrid The j-th grid.
* @param jSize The number of full clusters in the j-th grid.
* @param jRest If the last cluster is not full: The number of particles in the last cluster. 0 otherwise.
* @param distXYsqr The distance between the i-th grid and the j-th grid in the xy-plane.
*/
void addAllJClustersAsNeighborIfInRange(FullParticleCell<Particle> &iGrid, index_t iClusterIndex, int iClusterSize,
std::vector<std::vector<Particle *>> &iNeighbors,
FullParticleCell<Particle> &jGrid, index_t jSize, int jRest,
double distXYsqr) {
// bbox in z of iGrid
double iBBoxBot = iGrid[iClusterIndex * _clusterSize].getR()[2];
double iBBoxTop = iGrid[iClusterIndex * _clusterSize + iClusterSize - 1].getR()[2];
auto &iClusterNeighborList = iNeighbors[iClusterIndex];
Particle *iClusterStart = &iGrid[iClusterIndex * _clusterSize];
// iterate over full clusters of j-th grid.
for (index_t jClusterIndex = 0; jClusterIndex < jSize; jClusterIndex++) {
Particle *jClusterStart = &jGrid[jClusterIndex * _clusterSize];
// If newton 3 is used, only add clusters as neighbors that have a equal or higher index. Skip otherwise.
if (_neighborListIsNewton3 and _clusterIndexMap.at(iClusterStart) > _clusterIndexMap.at(jClusterStart)) continue;
addJClusterAsNeighborIfInRange(jGrid, jClusterStart, _clusterSize, iClusterNeighborList, distXYsqr, iBBoxBot,
iBBoxTop);
}
// special case: last cluster not full
if (jRest > 0) {
Particle *jClusterStart = &jGrid[jSize * _clusterSize];
// If newton 3 is used, only add clusters as neighbors that have a equal or higher index. Skip otherwise.
if (not(_neighborListIsNewton3 and _clusterIndexMap.at(iClusterStart) > _clusterIndexMap.at(jClusterStart))) {
addJClusterAsNeighborIfInRange(jGrid, jClusterStart, jRest, iClusterNeighborList, distXYsqr, iBBoxBot,
iBBoxTop);
}
}
}
/**
* Adds the given cluster in jGrid to the given neighbor list (iClusterNeighborList), if it is within the cutoff
* radius.
* @param jGrid The j-th grid.
* @param jClusterStart A pointer to the start of the cluster to work on in the j-th grid.
* @param jClusterSize The size of the cluster to work on in the j-th grid.
* @param iClusterNeighborList The neighbor list of the cluster in the i-th grid to fill the neighbors for.
* @param distXYsqr The distance between the i-th grid and the j-th grid in the xy-plane.
* @param iBBoxBot The bottom z-coordinate of the cluster in the i-th grid.
* @param iBBoxTop The top z-coordinate of the cluster in the i-th grid.
*/
void addJClusterAsNeighborIfInRange(FullParticleCell<Particle> &jGrid, Particle *jClusterStart, int jClusterSize,
std::vector<Particle *> &iClusterNeighborList, double distXYsqr, double iBBoxBot,
double iBBoxTop) {
// bbox in z of jGrid
double jBBoxBot = jClusterStart->getR()[2];
double jBBoxTop = (jClusterStart + (jClusterSize - 1))->getR()[2];
double distZ = bboxDistance(iBBoxBot, iBBoxTop, jBBoxBot, jBBoxTop);
if (distXYsqr + distZ * distZ <= _interactionLengthSqr) {
iClusterNeighborList.push_back(jClusterStart);
}
}
/**
* Pad clusters with dummy particles
* until each cluster is a multiple of _clusterSize.
* Useful for SIMD vectorization.
*/
void padClusters() {
for (index_t x = 0; x < _cellsPerDim[0]; x++) {
for (index_t y = 0; y < _cellsPerDim[1]; y++) {
auto &grid = this->_cells[VerletClusterMaths::index1D(x, y, _cellsPerDim)];
index_t rest = grid.numParticles() % _clusterSize;
if (rest > 0) {
for (int i = rest; i < _clusterSize; i++) {
Particle p = Particle();
p.setR({2 * x * _cutoff, 2 * y * _cutoff, 2 * _boxMax[2] + 2 * i * _cutoff});
grid.addParticle(p);
}
}
}
}
}
/**
* Calculates the distance of two bounding boxes in one dimension.
* @param min1 minimum coordinate of first bbox in tested dimension
* @param max1 maximum coordinate of first bbox in tested dimension
* @param min2 minimum coordinate of second bbox in tested dimension
* @param max2 maximum coordinate of second bbox in tested dimension
* @return distance
*/
inline double bboxDistance(const double min1, const double max1, const double min2, const double max2) const {
if (max1 < min2) {
return min2 - max1;
} else if (min1 > max2) {
return min1 - max2;
} else {
return 0;
}
}
/**
* Gets the 1d grid index containing a particle in given position.
* @param pos the position of the particle
* @return the index of the grid
*/
inline index_t get1DIndexOfPosition(const std::array<double, 3> &pos) const {
std::array<index_t, 2> cellIndex{};
for (int dim = 0; dim < 2; dim++) {
const long int value = (static_cast<long int>(floor((pos[dim] - _boxMin[dim]) * _gridSideLengthReciprocal))) + 1l;
const index_t nonnegativeValue = static_cast<index_t>(std::max(value, 0l));
const index_t nonLargerValue = std::min(nonnegativeValue, _cellsPerDim[dim] - 1);
cellIndex[dim] = nonLargerValue;
/// @todo this is a sanity check to prevent doubling of particles, but
/// could be done better! e.g. by border and flag manager
if (pos[dim] >= _boxMax[dim]) {
cellIndex[dim] = _cellsPerDim[dim] - 1;
} else if (pos[dim] < _boxMin[dim]) {
cellIndex[dim] = 0;
}
}
return VerletClusterMaths::index1D(cellIndex[0], cellIndex[1], _cellsPerDim);
}
/**
* Builds the _clusterIndexMap to be up to date with _cells.
*
* Every cluster gets an index assigned. The indices are given in a way so that the VerletClustersColoringTraversal
* works as easy as possible with newton 3. The newton 3 neighbor list just has to only save neighbors with a higher
* index, and there will be no data races.
*
* For each cluster now holds (with x-axis as left <=> right, y-axis <=> as top <=> bottom):
* - The indices of all clusters of the three color cells above and the color cell to the left are lower.
* - The indices of all clusters of the three color cells below and the color cell to the right are higher.
* - For all grids of the same color cell holds:
* - The indices of all clusters of the three grids above and the grids to the left are lower.
* - The indices of all clusters of the three grids below and the grids to the right are higher.
* - For all clusters in the same grid holds:
* - The indices of all clusters with a lower z-coordinate than the current cluster are lower.
* - The indices of all clusters with a higher z-coordinate than the current cluster are higher.
*
* @return The number of clusters in the container.
*/
index_t buildClusterIndexMap() {
index_t nextFreeMapIndex = 0;
int gridsPerColoringCell = static_cast<int>(std::ceil((_cutoff + _skin) / _gridSideLength));
std::array<unsigned long, 3> coloringCellsPerDim{};
for (int i = 0; i < 3; i++) {
coloringCellsPerDim[i] =
static_cast<unsigned long>(std::ceil(_cellsPerDim[i] / static_cast<double>(gridsPerColoringCell)));
}
for (unsigned long yColorCell = 0; yColorCell < coloringCellsPerDim[1]; yColorCell++) {
for (unsigned long xColorCell = 0; xColorCell < coloringCellsPerDim[0]; xColorCell++) {
nextFreeMapIndex = indexColorCell(xColorCell, yColorCell, gridsPerColoringCell, nextFreeMapIndex);
}
}
return nextFreeMapIndex;
}
private:
/**
* Indexes all clusters of one color cell (inserts value into _clusterIndexMap) starting with currentMapIndex.
*
* The scheme follows the documentation from buildClusterIndexMap().
* @param xColorCell The x coordinate of the color cell.
* @param yColorCell The y coordinate of the color cell.
* @param gridsPerColoringCell The number of grids in x and y dimension of this color cell.
* @param currentMapIndex The first index to use.
* @return The next available index after this cell.
*/
index_t indexColorCell(unsigned long xColorCell, unsigned long yColorCell, int gridsPerColoringCell,
index_t currentMapIndex) {
for (int yInner = 0; yInner < gridsPerColoringCell; yInner++) {
for (int xInner = 0; xInner < gridsPerColoringCell; xInner++) {
unsigned long y = yColorCell * gridsPerColoringCell + yInner;
unsigned long x = xColorCell * gridsPerColoringCell + xInner;
// Not every coloring cell has to have gridsPerColoringCell grids in every direction.
if (x >= _cellsPerDim[0] or y >= _cellsPerDim[1]) {
continue;
}
unsigned long gridIndex1D = VerletClusterMaths::index1D(x, y, _cellsPerDim);
auto ¤tGrid = this->_cells[gridIndex1D];
auto numClusters = currentGrid.numParticles() / _clusterSize;
int rest = currentGrid.numParticles() % _clusterSize;
if (rest > 0) numClusters++;
for (unsigned long currentCluster = 0; currentCluster < numClusters; currentCluster++) {
Particle *clusterStart = ¤tGrid[currentCluster * _clusterSize];
_clusterIndexMap[clusterStart] = currentMapIndex++;
}
}
}
return currentMapIndex;
}
private:
/**
* Neighbors of clusters for each grid. If it uses newton 3 is saved in _neighborListIsNewton3.
* If it uses newton 3: Only the neighbor clusters that have a higher index are saved. (@see _clusterIndexMap)
*/
std::vector<std::vector<std::vector<Particle *>>> _neighborLists;
/**
* The number of particles in a full cluster.
*/
int _clusterSize;
/**
* The number of clusters. This is not equal to _cells.size(), as every grid (=cell) might contain multiple clusters.
*/
index_t _numClusters;
/**
* Box min of the domain.
*/
std::array<double, 3> _boxMin;
/**
* Box max of the domain.
*/
std::array<double, 3> _boxMax;
/**
* Side length of xy-grid.
*/
double _gridSideLength{0.};
/**
* Reciprocal of _gridSideLength.
*/
double _gridSideLengthReciprocal{0.};
/**
* Dimensions of the 2D xy-grid.
*/
std::array<index_t, 3> _cellsPerDim{};
/**
* The skin radius.
*/
double _skin;
/**
* The cutoff.
*/
double _cutoff;
/**
* Specifies if the neighbor list uses newton 3 or not.
*/
bool _neighborListIsNewton3;
/**
* Maps indices to the starting pointers for each cluster. For the idea behind the assignment, @see
* buildClusterIndexMap().
*/
std::unordered_map<Particle *, index_t> _clusterIndexMap;
/**
* (_cutoff + _skin)^2.
*/
double _interactionLengthSqr;
};
} // namespace autopas
|
DRB064-outeronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
The inner loop has loop carried true data dependence.
However, the loop is not parallelized so no race condition.
*/
double b[100][100];
#define N 100
int init()
{
int i,j,k;
#pragma omp parallel for private(i, j)
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j)
for (j = 0; j < N; j++) {
b[i][j] = i * j;
}
}
return 0;
}
void foo(int n, int m)
{
int i,j;
#pragma omp parallel for private(i, j)
for (i=0;i<n;i++)
for (j=1;j<m;j++) // Be careful about bounds of j
b[i][j]=b[i][j-1];
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", b[i][j]);
}
}
return 0;
}
int main()
{
init();
foo(100, 100);
print();
return 0;
}
|
DRB002-antidep1-var-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@67:10 vs. a[i]@67:5
*/
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i;
int len = 1000;
int a[len];
int _ret_val_0;
if (argc>1)
{
len=atoi(argv[1]);
}
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=i;
}
#pragma cetus private(i)
#pragma loop name main#1
for (i=0; i<(len-1); i ++ )
{
a[i]=(a[i+1]+1);
}
#pragma cetus private(i)
#pragma loop name main#2
for (i=0; i<len; i ++ )
{
printf("%d\n", a[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
gather.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_GATHER_H_
#define MACE_KERNELS_GATHER_H_
#include <algorithm>
#include <functional>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/public/mace.h"
namespace mace {
namespace kernels {
struct GatherBase {
explicit GatherBase(int axis, float y) : axis_(axis), y_(y) {}
int axis_;
float y_;
};
template <DeviceType D, typename T>
struct GatherFunctor;
template <>
struct GatherFunctor<DeviceType::CPU, float> : GatherBase {
explicit GatherFunctor(int axis, float y) : GatherBase(axis, y) {}
MaceStatus operator()(const Tensor *params,
const Tensor *indices,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
std::vector<index_t> output_shape;
if (axis_ < 0) {
axis_ += params->dim_size();
}
MACE_CHECK(axis_ >= 0 && axis_ < params->dim_size(),
"axis is out of bound: ", axis_);
output_shape.insert(output_shape.end(), params->shape().begin(),
params->shape().begin() + axis_);
output_shape.insert(output_shape.end(), indices->shape().begin(),
indices->shape().end());
output_shape.insert(output_shape.end(),
params->shape().begin() + (axis_ + 1),
params->shape().end());
MACE_RETURN_IF_ERROR(output->Resize(output_shape));
Tensor::MappingGuard indices_guard(indices);
Tensor::MappingGuard params_guard(params);
Tensor::MappingGuard output_guard(output);
const int32_t *indices_data = indices->data<int32_t>();
const float *params_data = params->data<float>();
float *output_data = output->mutable_data<float>();
index_t axis_dim_size = params->dim(axis_);
index_t lhs_size = std::accumulate(params->shape().begin(),
params->shape().begin() + axis_, 1,
std::multiplies<index_t>());
index_t rhs_size =
std::accumulate(params->shape().begin() + (axis_ + 1),
params->shape().end(), 1, std::multiplies<index_t>());
index_t index_size = indices->size();
#pragma omp parallel for collapse(2)
for (index_t l = 0; l < lhs_size; ++l) {
for (index_t idx = 0; idx < index_size; ++idx) {
MACE_ASSERT(indices_data[idx] < axis_dim_size, "idx out of bound: ",
indices_data[idx]);
memcpy(
output_data + ((l * index_size) + idx) * rhs_size,
params_data + ((l * axis_dim_size) + indices_data[idx]) * rhs_size,
sizeof(float) * rhs_size);
}
}
if (std::fabs(y_ - 1.0) > 1e-6) {
#pragma omp parallel for
for (index_t i = 0; i < output->size(); ++i) {
output_data[i] *= y_;
}
}
return MACE_SUCCESS;
}
};
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_GATHER_H_
|
ten_tusscher_2004_epi_S3_12.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S3_12.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6659643096818,0.00126342859487541,0.782100262764972,0.781978091983927,0.000172373983216983,0.486107163112887,0.00291990687735242,0.999998380250089,1.90224754407316e-08,1.86658961737956e-05,0.999770013320589,1.00741339003808,0.999998449667419,3.76483988014377e-05,0.470997021889879,10.7143996824072,138.907396963001};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5122469946898,0.000289049813785555,0.000138099248415042,6.23108182726077e-05,0.235920184817430,0.137885432304245,0.171858104323313,4.54207690553048,0.0146701313967813,1.02382441517950,1099.87497959849,0.000601753938706630,0.327493680344098,0.0188930125219796,0.00506656306041461,4.49126756029006e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
zero_omp.c | /*
* File: zero_omp.c
* Author: Philip Mucci
* mucci@cs.utk.edu
* Mods: Nils Smeds
* smeds@pdc.kth.se
* Anders Nilsson
* anni@pdc.kth.se
*/
/* This file performs the following test: start, stop and timer
functionality for 2 slave OMP threads
- It attempts to use the following two counters. It may use less
depending on hardware counter resource limitations. These are counted
in the default counting domain and default granularity, depending on
the platform. Usually this is the user domain (PAPI_DOM_USER) and
thread context (PAPI_GRN_THR).
+ PAPI_FP_INS
+ PAPI_TOT_CYC
Each thread inside the Thread routine:
- Get cyc.
- Get us.
- Start counters
- Do flops
- Stop and read counters
- Get us.
- Get cyc.
Master serial thread:
- Get us.
- Get cyc.
- Run parallel for loop
- Get us.
- Get cyc.
*/
#include "papi_test.h"
#ifdef _OPENMP
#include <omp.h>
#else
#error "This compiler does not understand OPENMP"
#endif
const PAPI_hw_info_t *hw_info = NULL;
void
Thread( int n )
{
int retval, num_tests = 1;
int EventSet1 = PAPI_NULL;
int PAPI_event, mask1;
int num_events1;
long long **values;
long long elapsed_us, elapsed_cyc;
char event_name[PAPI_MAX_STR_LEN];
printf( "Thread %#x started\n", omp_get_thread_num( ) );
num_events1 = 2;
/* add PAPI_TOT_CYC and one of the events in
PAPI_FP_INS, PAPI_FP_OPS or PAPI_TOT_INS,
depending on the availability of the event
on the platform */
EventSet1 = add_two_events( &num_events1, &PAPI_event, &mask1 );
retval = PAPI_event_code_to_name( PAPI_event, event_name );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval );
values = allocate_test_space( num_tests, num_events1 );
elapsed_us = PAPI_get_real_usec( );
elapsed_cyc = PAPI_get_real_cyc( );
retval = PAPI_start( EventSet1 );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_start", retval );
do_flops( n );
retval = PAPI_stop( EventSet1, values[0] );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_stop", retval );
elapsed_us = PAPI_get_real_usec( ) - elapsed_us;
elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc;
remove_test_events( &EventSet1, mask1 );
if ( !TESTS_QUIET ) {
printf( "Thread %#x %-12s : \t%lld\n", omp_get_thread_num( ),
event_name, values[0][1] );
printf( "Thread %#x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num( ),
values[0][0] );
printf( "Thread %#x Real usec : \t%lld\n", omp_get_thread_num( ),
elapsed_us );
printf( "Thread %#x Real cycles : \t%lld\n", omp_get_thread_num( ),
elapsed_cyc );
}
/* It is illegal for the threads to exit in OpenMP */
/* test_pass(__FILE__,0,0); */
free_test_space( values, num_tests );
PAPI_unregister_thread( );
printf( "Thread %#x finished\n", omp_get_thread_num( ) );
}
int
main( int argc, char **argv )
{
int maxthr, retval;
long long elapsed_us, elapsed_cyc;
tests_quiet( argc, argv ); /* Set TESTS_QUIET variable */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if ( retval != PAPI_VER_CURRENT )
test_fail( __FILE__, __LINE__, "PAPI_library_init", retval );
hw_info = PAPI_get_hardware_info( );
if ( hw_info == NULL )
test_fail( __FILE__, __LINE__, "PAPI_get_hardware_info", 2 );
elapsed_us = PAPI_get_real_usec( );
elapsed_cyc = PAPI_get_real_cyc( );
retval =
PAPI_thread_init( ( unsigned
long ( * )( void ) ) ( omp_get_thread_num ) );
if ( retval != PAPI_OK ) {
if ( retval == PAPI_ECMP )
test_skip( __FILE__, __LINE__, "PAPI_thread_init", retval );
else
test_fail( __FILE__, __LINE__, "PAPI_thread_init", retval );
}
#pragma omp parallel private(maxthr)
{
maxthr = omp_get_num_threads( );
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
}
omp_set_num_threads( 1 );
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
omp_set_num_threads( omp_get_max_threads( ) );
#pragma omp parallel private(maxthr)
{
maxthr = omp_get_num_threads( );
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
}
elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc;
elapsed_us = PAPI_get_real_usec( ) - elapsed_us;
if ( !TESTS_QUIET ) {
printf( "Master real usec : \t%lld\n", elapsed_us );
printf( "Master real cycles : \t%lld\n", elapsed_cyc );
}
test_pass( __FILE__, NULL, 0 );
exit( 0 );
}
|
test_openmp.c | #include <nautilus/nautilus.h>
#include <nautilus/shell.h>
#include <rt/openmp/openmp.h>
#define N 4
volatile float a[N];
volatile float b[N];
volatile float c[N];
static int omp_simple()
{
int i;
for (i=0;i<N;i++) {
a[i] = i;
b[i] = i;
}
#pragma omp parallel
nk_vc_printf("I am thread %d (%d total)\n",omp_get_thread_num(),omp_get_num_threads());
#pragma omp parallel for
for (i=0;i<N;i++) {
c[i] = a[i] * b[i];
}
for (i=0;i<N;i++) {
nk_vc_printf("a[%d]=%d b[%d]=%d c[%d]=%d\n",i,(int)a[i],i,(int)b[i],i,(int)c[i]);
}
return 0;
}
static void report_num_threads(int level)
{
#pragma omp single
{
nk_vc_printf("Level %d: number of threads in the team - %d\n",
level, omp_get_num_threads());
}
}
static int
omp_nested (void)
{
omp_set_dynamic(0);
#pragma omp parallel num_threads(2)
{
report_num_threads(1);
#pragma omp parallel num_threads(2)
{
report_num_threads(2);
#pragma omp parallel num_threads(2)
{
report_num_threads(3);
}
}
}
return(0);
}
int
test_omp (void)
{
nk_openmp_thread_init();
nk_vc_printf("Starting simple test\n");
omp_simple();
// goto out;
nk_vc_printf("Starting nested test\n");
omp_nested();
//out:
nk_vc_printf("OMP test finished\n");
nk_openmp_thread_deinit();
return 0;
}
static int
handle_omp (char * buf, void * priv)
{
test_omp();
return 0;
}
static struct shell_cmd_impl omp_impl = {
.cmd = "omp",
.help_str = "omp",
.handler = handle_omp,
};
nk_register_shell_cmd(omp_impl);
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
//
// TODO: Add the following runtime calls.
// omp_set_num_threads
//
// All Lock Routines.
//
int main(void) {
// CHECK: Able to use offloading!
check_offloading();
int fail;
double A[N], B[N], C[N], D[N], E[N];
INIT();
//
// Test: omp_get_num_threads()
//
ZERO(A);
TEST({
_Pragma("omp parallel if (0)")
A[0] = omp_get_num_threads(); // 1
_Pragma("omp parallel num_threads(128)")
{
if (omp_get_thread_num() == 3) {
A[0] += omp_get_num_threads(); // 128
}
}
}, VERIFY(0, 1, A[i], 129));
//
// Test: omp_get_max_threads() (depends on device type)
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_max_threads();
_Pragma("omp parallel")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_max_threads(); // 1
A[1] = omp_get_num_threads();
}
}
}, if (!omp_is_initial_device()) VERIFY(0, 1, A[i], A[1] + 1));
//
// Test: omp_get_num_procs()
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_num_procs();
_Pragma("omp parallel")
{
if (omp_get_thread_num() == 18) {
A[0] += omp_get_num_procs();
A[1] = 2*omp_get_num_threads();
}
}
}, VERIFY(0, 1, A[i], A[1]));
//
// Test: omp_in_parallel()
//
ZERO(A);
TEST({
_Pragma("omp distribute dist_schedule(static,1)")
for (int i = 0; i < 1; ++i) {
_Pragma("atomic write")
A[0] = omp_in_parallel(); // 0
}
// Serialized parallel
_Pragma("omp parallel num_threads(32) if (A[0] == 1)")
{
_Pragma("atomic update")
A[0] += omp_in_parallel(); // 0
}
// Parallel execution
_Pragma("omp parallel num_threads(32) if (A[0] == 0)")
{
if (omp_get_thread_num() == 0) {
_Pragma("atomic update")
A[0] += omp_in_parallel(); // 1
}
}
}, VERIFY(0, 1, A[i], 1));
//
// Test: omp_set/get_dynamic()
//
ZERO(A);
TEST({
_Pragma("omp distribute dist_schedule(static,1)")
for (int i = 0; i < 1; ++i)
{
A[0] = omp_get_dynamic(); // 0
omp_set_dynamic(1);
A[0] += omp_get_dynamic(); // 1
}
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_dynamic(); // 1
omp_set_dynamic(0); // Only for this parallel region.
}
}
_Pragma("omp distribute dist_schedule(static,1)")
for (int i = 0; i < 1; ++i)
A[0] += omp_get_dynamic(); // 1
}, VERIFY(0, 1, A[i], 3));
//
// Test: omp_get_cancellation()
// FIXME: Rewrite test case once we have cancellation support.
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_cancellation(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_cancellation(); // 0
}
}
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_set/get_nested(). Not used on the device currently.
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
{
A[0] = omp_get_nested(); // 0
omp_set_nested(0);
A[0] += omp_get_nested(); // 0
}
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 18) {
A[0] += omp_get_nested(); // 0
omp_set_nested(0);
}
}
_Pragma("omp parallel if(0)")
A[0] += omp_get_nested(); // 0
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_set/get_schedule().
//
ZERO(A);
int result = 2 * (omp_sched_static + omp_sched_dynamic + omp_sched_guided) + omp_sched_static;
result += 2 * (1110) + 10;
TEST({
omp_sched_t t; int chunk_size;
_Pragma("omp distribute dist_schedule(static,1)")
for (int i = 0; i < 1; ++i)
{
t = omp_sched_static; chunk_size = 10;
omp_set_schedule(t, chunk_size);
t = omp_sched_auto; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] = t + chunk_size;
t = omp_sched_dynamic; chunk_size = 100;
omp_set_schedule(t, chunk_size);
t = omp_sched_auto; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_guided; chunk_size = 1000;
omp_set_schedule(t, chunk_size);
t = omp_sched_auto; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_static; chunk_size = 10;
omp_set_schedule(t, chunk_size);
}
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
omp_sched_t t; int chunk_size;
t = omp_sched_static; chunk_size = 10;
omp_set_schedule(t, chunk_size);
t = omp_sched_auto; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_dynamic; chunk_size = 100;
omp_set_schedule(t, chunk_size);
t = omp_sched_auto; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_guided; chunk_size = 1000;
omp_set_schedule(t, chunk_size);
t = omp_sched_auto; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
}
}
t = omp_sched_auto; chunk_size = 0;
_Pragma("omp parallel if(0)")
omp_get_schedule(&t, &chunk_size); // should read 1, 10;
A[0] += t + chunk_size;
}, VERIFY(0, 1, A[i], result));
//
// Test: omp_get_thread_limit()
//
ZERO(A);
TEST({
_Pragma("omp distribute dist_schedule(static,1)")
for (int i = 0; i < 1; ++i)
A[0] = omp_get_thread_limit();
_Pragma("omp parallel")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_thread_limit();
A[1] = 2*omp_get_num_threads();
}
}
}, VERIFY(0, 1, A[i], A[1]));
//
// Test: omp_set/get_max_active_levels()
//
ZERO(A);
TEST({
// Our runtime ignores this.
_Pragma("omp parallel if(0)")
{
omp_set_max_active_levels(1);
A[0] = omp_get_max_active_levels(); // 1
}
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_max_active_levels(); // 1
}
}
}, VERIFY(0, 1, A[i], 2));
//
// Test: omp_get_level()
//
ZERO(A);
TEST({
_Pragma("omp distribute dist_schedule(static,1)")
for (int i = 0; i < 1; ++i)
A[0] = omp_get_level(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_level(); // 1
}
}
}, VERIFY(0, 1, A[i], 1));
//
// Test: omp_get_ancestor_thread_num()
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_ancestor_thread_num(0); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_ancestor_thread_num(0) + omp_get_ancestor_thread_num(1); // 0 + 18
}
}
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_get_team_size()
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_team_size(0) + omp_get_team_size(1); // 1 + 1
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_team_size(0) + omp_get_team_size(1); // 1 + 19
}
}
}, if (!omp_is_initial_device()) VERIFY(0, 1, A[i], 22)); // TODO: fix host execution
//
// Test: omp_get_active_level()
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_active_level(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
if (omp_get_num_threads() == 1)
A[0] += 1;
else
A[0] += omp_get_active_level(); // 1
}
}
}, VERIFY(0, 1, A[i], 1));
//
// Test: omp_in_final()
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_in_final(); // 1 always returns true.
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_in_final(); // 1 always returns true.
}
}
}, VERIFY(0, 1, A[i], omp_is_initial_device() ? 0 : 2));
//
// Test: omp_get_proc_bind()
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_proc_bind(); // 1 always returns omp_proc_bind_true.
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_proc_bind(); // 1 always returns omp_proc_bind_true.
}
}
}, VERIFY(0, 1, A[i], omp_is_initial_device() ? 0 : 2));
#if 0
//
// Test: Place routines (linking only).
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
{
(void) omp_get_num_places();
(void) omp_get_place_num_procs(0);
int *ids;
omp_get_place_proc_ids(0, ids);
(void) omp_get_place_num();
(void) omp_get_partition_num_places();
int *place_nums;
omp_get_partition_place_nums(place_nums);
}
}, VERIFY(0, 1, A[i], 0));
#endif
//
// Test: omp_set/get_default_device()
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
{
omp_set_default_device(0); // Not used on device.
A[0] = omp_get_default_device(); // 0 always returns 0.
}
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_default_device(); // 0 always returns 0.
}
}
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_get_num_devices(). Undefined on the target.
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_num_devices();
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[1] = omp_get_num_devices();
}
}
}, VERIFY(0, 1, A[i], A[i] - A[1]));
//
// Test: omp_get_num_teams(), omp_get_team_num()
// FIXME: Start teams region when supported.
//
ZERO(A);
TEST({
A[0] = omp_get_num_teams(); // 1
A[0] += omp_get_team_num(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_num_teams(); // 1
A[0] += omp_get_team_num(); // 0
}
}
}, VERIFY(0, 1, A[i], 2));
//
// Test: omp_is_initial_device()
//
ZERO(A);
A[1] = omp_is_initial_device();
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_is_initial_device(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_is_initial_device(); // 0
}
}
}, VERIFY(0, 1, A[i], omp_is_initial_device() ? A[1] - A[1] : 2.0));
return 0;
#if 0
//
// Test: omp_get_initial_device(). Unspecified behavior when
// called from device.
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_initial_device();
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 18) {
A[0] -= omp_get_initial_device();
}
}
}, VERIFY(0, 1, A[i], 0));
#endif
#if 0
//
// Test: omp_get_max_task_priority().
// TODO: Not used on the gpu at the moment.
//
ZERO(A);
TEST({
_Pragma("omp parallel if(0)")
A[0] = omp_get_max_task_priority();
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 18) {
A[0] -= omp_get_max_task_priority();
}
}
}, VERIFY(0, 1, A[i], 0));
#endif
//
// Test: Timing Routines (linking only).
//
ZERO(A);
TEST({
double precision;
_Pragma("omp parallel if(0)")
precision = omp_get_wtick();
double start; double end;
_Pragma("omp parallel if(0)")
{
start = omp_get_wtime();
end = omp_get_wtime();
}
}, VERIFY(0, 1, A[i], 0));
return 0;
}
|
nodal_residualbased_elimination_builder_and_solver_for_FSI.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI)
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolverForFSI
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolverForFSI
: public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverForFSI);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolverForFSI(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverForFSI") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolverForFSI() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SetMaterialPropertiesToFluid(
ModelPart::NodeIterator itNode,
double &density,
double &deviatoricCoeff,
double &volumetricCoeff,
double timeInterval,
double nodalVolume)
{
density = itNode->FastGetSolutionStepValue(DENSITY);
deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
if (volumetricCoeff > 0)
{
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
double bulkReduction = density * nodalVolume / (timeInterval * volumetricCoeff);
volumetricCoeff *= bulkReduction;
}
}
void SetMaterialPropertiesToSolid(
ModelPart::NodeIterator itNode,
double &density,
double &deviatoricCoeff,
double &volumetricCoeff,
double timeInterval,
double nodalVolume)
{
density = itNode->FastGetSolutionStepValue(SOLID_DENSITY);
double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
//deviatoricCoeff=deltaT*secondLame
deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
//volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3)
volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0;
}
void BuildSolidNodally(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the system
LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType solidEquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
const double FourThirds = 4.0 / 3.0;
const double nTwoThirds = -2.0 / 3.0;
//double theta = 0.5;
double theta=1.0;
array_1d<double, 3> Acc(3, 0.0);
double dNdXi = 0;
double dNdYi = 0;
double dNdZi = 0;
double dNdXj = 0;
double dNdYj = 0;
double dNdZj = 0;
unsigned int firstRow = 0;
unsigned int firstCol = 0;
double density = 0;
double deviatoricCoeff = 0;
double volumetricCoeff = 0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
if (itNode->Is(SOLID))
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
// const unsigned int neighSize = neighb_nodes.size()+1;
const unsigned int neighSize = solidNodalSFDneighboursId.size();
const double nodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
if (neighSize > 1 && nodalVolume > 0)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size();
if (solidLHS_Contribution.size1() != localSize)
solidLHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!!
if (solidRHS_Contribution.size() != localSize)
solidRHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!!
if (solidEquationId.size() != localSize)
solidEquationId.resize(localSize, false);
solidLHS_Contribution = ZeroMatrix(localSize, localSize);
solidRHS_Contribution = ZeroVector(localSize);
this->SetMaterialPropertiesToSolid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume);
firstRow = 0;
firstCol = 0;
if (dimension == 2)
{
//////////////////////////// LHS TERMS //////////////////////////////
solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval;
solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0);
solidRHS_Contribution[0] += -nodalVolume * density * Acc[0];
solidRHS_Contribution[1] += -nodalVolume * density * Acc[1];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
// double posX= itNode->X();
// double posY= itNode->Y();
// double coeffX =(12.0-24.0*posY)*pow(posX,4);
// coeffX += (-24.0+48.0*posY)*pow(posX,3);
// coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
// RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX;
// RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY;
solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0];
solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1];
///////////////LOAD CONDITIONS FOR BELITSCHKO CASE
// if(itNode->X0()>24.999){
// // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge)
// // solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge)
// // solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge)
// // solidRHS_Contribution[1]+=40.0/9.0; // mesh 0.5 (8 element per edge)
// // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge)
// // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge)
// // solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge)
// }
//-------- INTERNAL FORCES TERM -------//
array_1d<double, 3> Sigma(3, 0.0);
Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1];
solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]);
solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]);
for (unsigned int j = 0; j < neighSize; j++)
{
dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1];
solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta;
firstRow += 2;
}
firstRow = 0;
firstCol += 2;
unsigned int indexNode = i + 1;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize)
{
unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for (unsigned int k = 0; k < neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if (neigh_nodes_id == other_neigh_nodes_id)
{
solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId();
solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
break;
}
}
}
else if (i < neighb_nodes.size())
{
solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
}
}
/* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */
}
else if (dimension == 3)
{
//////////////////////////// LHS TERMS //////////////////////////////
solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval;
solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval;
solidLHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0);
solidRHS_Contribution[0] += -nodalVolume * density * Acc[0];
solidRHS_Contribution[1] += -nodalVolume * density * Acc[1];
solidRHS_Contribution[2] += -nodalVolume * density * Acc[2];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0];
solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1];
solidRHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2];
///////////////LOAD CONDITIONS FOR BELITSCHKO CASE
// if(itNode->X0()>24.999){
// // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge)
// // solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge)
// // solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge)
// solidRHS_Contribution[1]+=40.0/27.0; // mesh 0.5 (8 element per edge, 2 per width)
// // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge)
// // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge)
// // solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge)
// }
//-------- INTERNAL FORCES TERM -------//
array_1d<double, 6> Sigma(6, 0.0);
Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// }
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
solidEquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1];
dNdZi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 2];
solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]);
solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]);
solidRHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]);
for (unsigned int j = 0; j < neighSize; j++)
{
dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1];
dNdZj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 2];
solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta;
solidLHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta;
firstRow += 3;
}
firstRow = 0;
firstCol += 3;
unsigned int indexNode = i + 1;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize)
{
unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for (unsigned int k = 0; k < neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if (neigh_nodes_id == other_neigh_nodes_id)
{
solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId();
solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
solidEquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
break;
}
}
}
else if (i < neighb_nodes.size())
{
solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
solidEquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
}
}
#ifdef _OPENMP
Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array);
#else
Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId);
#endif
}
}
}
// }
KRATOS_CATCH("")
}
void BuildFluidNodally(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
/* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
const double FourThirds = 4.0 / 3.0;
const double nTwoThirds = -2.0 / 3.0;
double theta = 0.5;
array_1d<double, 3> Acc(3, 0.0);
// array_1d<double,6> Sigma(6,0.0);
double pressure = 0;
double dNdXi = 0;
double dNdYi = 0;
double dNdZi = 0;
double dNdXj = 0;
double dNdYj = 0;
double dNdZj = 0;
unsigned int firstRow = 0;
unsigned int firstCol = 0;
double density = 0;
double deviatoricCoeff = 0;
double volumetricCoeff = 0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
// const unsigned int neighSize = neighb_nodes.size()+1;
const unsigned int neighSize = nodalSFDneighboursId.size();
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
if (neighSize > 1 && nodalVolume > 0)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
if (LHS_Contribution.size1() != localSize)
LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!!
if (RHS_Contribution.size() != localSize)
RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!!
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
LHS_Contribution = ZeroMatrix(localSize, localSize);
RHS_Contribution = ZeroVector(localSize);
this->SetMaterialPropertiesToFluid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// // std::cout<<"density,deviatoricCoeff,volumetricCoeff "<<density<<" "<<deviatoricCoeff<<" "<<volumetricCoeff<<std::endl;
// std::cout<<"INTERFACE nodalVolume "<<nodalVolume<<std::endl;
// }else{
// std::cout<<"nodalVolume "<<nodalVolume<<std::endl;
// }
firstRow = 0;
firstCol = 0;
if (dimension == 2)
{
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval;
LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION, 0);
RHS_Contribution[0] += -nodalVolume * density * Acc[0];
RHS_Contribution[1] += -nodalVolume * density * Acc[1];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
// double posX= itNode->X();
// double posY= itNode->Y();
// double coeffX =(12.0-24.0*posY)*pow(posX,4);
// coeffX += (-24.0+48.0*posY)*pow(posX,3);
// coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
// RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX;
// RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY;
RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0];
RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1];
//-------- INTERNAL FORCES TERM -------//
array_1d<double, 3> Sigma(3, 0.0);
Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// }
if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta);
Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
}
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1];
RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]);
RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]);
for (unsigned int j = 0; j < neighSize; j++)
{
dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1];
LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta;
firstRow += 2;
}
firstRow = 0;
firstCol += 2;
unsigned int indexNode = i + 1;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for (unsigned int k = 0; k < neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if (neigh_nodes_id == other_neigh_nodes_id)
{
EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
break;
}
}
}
else if (i < neighb_nodes.size())
{
EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
}
}
/* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */
}
else if (dimension == 3)
{
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval;
LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval;
LHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION, 0);
RHS_Contribution[0] += -nodalVolume * density * Acc[0];
RHS_Contribution[1] += -nodalVolume * density * Acc[1];
RHS_Contribution[2] += -nodalVolume * density * Acc[2];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0];
RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1];
RHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2];
//-------- INTERNAL FORCES TERM -------//
array_1d<double, 6> Sigma(6, 0.0);
Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// }
if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta);
Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
Sigma[2] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure;
}
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1];
dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2];
RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]);
RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]);
RHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]);
for (unsigned int j = 0; j < neighSize; j++)
{
dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1];
dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2];
LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta;
LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta;
firstRow += 3;
}
firstRow = 0;
firstCol += 3;
unsigned int indexNode = i + 1;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for (unsigned int k = 0; k < neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if (neigh_nodes_id == other_neigh_nodes_id)
{
EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
break;
}
}
}
else if (i < neighb_nodes.size())
{
EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
}
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
// }
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b,
ModelPart &rModelPart)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded())
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
Timer::Start("Build");
// boost::timer m_build_time;
BuildSolidNodally(pScheme, rModelPart, A, b);
BuildFluidNodally(pScheme, rModelPart, A, b);
// std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl;
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
/* boost::timer m_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType &pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
// #pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
ConditionsArrayType &pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp parallel for firstprivate(nconditions, ElementalDofList)
for (int i = 0; i < nconditions; i++)
{
typename ConditionsArrayType::iterator it = pConditions.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5 * static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5 * static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(*it);
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if (BaseType::GetCalculateReactionsFlag())
{
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl
<< "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart &rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType &pA,
TSystemVectorPointerType &pDx,
TSystemVectorPointerType &pb,
ModelPart &rModelPart) override
{
KRATOS_TRY
// boost::timer m_contruct_matrix;
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType &A = *pA;
TSystemVectorType &Dx = *pDx;
TSystemVectorType &b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructureForFSI(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructureForFSI(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
// std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart &rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType &A,
TSystemVectorType &b,
const LocalSystemMatrixType &LHS_Contribution,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId
#ifdef _OPENMP
,
std::vector<omp_lock_t> &lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructureForFSI(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &A,
ModelPart &rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
#ifdef USE_GOOGLE_HASH
std::vector<google::dense_hash_set<std::size_t>> indices(equation_size);
const std::size_t empty_key = 2 * equation_size + 10;
#else
std::vector<std::unordered_set<std::size_t>> indices(equation_size);
#endif
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
#ifdef USE_GOOGLE_HASH
indices[iii].set_empty_key(empty_key);
#else
indices[iii].reserve(40);
#endif
}
Element::EquationIdVectorType EquationId;
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
if (itNode->Is(SOLID))
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
const unsigned int neighSize = nodalSFDneighboursId.size();
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
unsigned int firstCol = 0;
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
unsigned int indexNode = i + 1;
if (indexNode < neighSize)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode];
firstCol += dimension;
for (unsigned int k = 0; k < neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
{
EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
break;
}
}
}
}
}
else
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
firstCol += dimension;
EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
{
EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
}
}
}
if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
const unsigned int neighSize = nodalSFDneighboursId.size();
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
unsigned int firstCol = 0;
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
unsigned int indexNode = i + 1;
if (indexNode < neighSize)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode];
firstCol += dimension;
for (unsigned int k = 0; k < neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
{
EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
break;
}
}
}
}
}
else
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
firstCol += dimension;
EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
{
EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
}
}
}
for (std::size_t i = 0; i < EquationId.size(); i++)
{
if (EquationId[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[EquationId[i]]);
#endif
auto &row_indices = indices[EquationId[i]];
for (auto it = EquationId.begin(); it != EquationId.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[EquationId[i]]);
#endif
}
}
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii < nconditions; iii++)
{
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->Condition_EquationId(*(i_condition.base()), ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++)
{
if (ids[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[ids[i]]);
#endif
auto &row_indices = indices[ids[i]];
for (auto it = ids.begin(); it != ids.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[ids[i]]);
#endif
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double *Avalues = A.value_data().begin();
std::size_t *Arow_indices = A.index1_data().begin();
std::size_t *Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
void AssembleLHS(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector<omp_lock_t> mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
void AssembleRHS(
TSystemVectorType &b,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolverForFSI */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundCensus(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
census;
Image
*edge_image;
PixelInfo
background,
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetPixelInfoPixel(image,p,&background);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
artifact=GetImageArtifact(image,"trim:background-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
census=0.0;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
census++;
p+=GetPixelChannels(edge_image);
}
}
census/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(census);
}
static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge)
{
double
census;
census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(census);
}
static RectangleInfo GetEdgeBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_census,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
(void) memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_census=GetMinEdgeBackgroundCensus(&edge);
for ( ; background_census < percent_background;
background_census=GetMinEdgeBackgroundCensus(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_census) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_census) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_census) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_census) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
PixelInfo
target[4],
zero;
RectangleInfo
bounds;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"trim:percent-background");
if (artifact != (const char *) NULL)
return(GetEdgeBoundingBox(image,exception));
artifact=GetImageArtifact(image, "trim:edges");
if (artifact == (const char *) NULL)
{
bounds.width=image->columns == 1 ? 1 : 0;
bounds.height=image->rows == 1 ? 1 : 0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
}
else
{
char
*edges,
*p,
*q;
bounds.width=(size_t) image->columns;
bounds.height=(size_t) image->rows;
bounds.x=0;
bounds.y=0;
edges=AcquireString(artifact);
q=edges;
while ((p=StringToken(",",&q)) != (char *) NULL)
{
if (LocaleCompare(p,"north") == 0)
bounds.y=(ssize_t) image->rows;
if (LocaleCompare(p,"east") == 0)
bounds.width=0;
if (LocaleCompare(p,"south") == 0)
bounds.height=0;
if (LocaleCompare(p,"west") == 0)
bounds.x=(ssize_t) image->columns;
}
edges=DestroyString(edges);
}
GetPixelInfo(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
GetPixelInfoPixel(image,p,&target[0]);
GetPixelInfo(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[1]);
GetPixelInfo(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[2]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t)
image->rows-1,1,1,exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[3]);
status=MagickTrue;
GetPixelInfo(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,p,&pixel);
if ((x < bounding_box.x) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
if ((x < (ssize_t) bounding_box.width) &&
(y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse))
{
bounding_box.width=(size_t) x;
bounding_box.height=(size_t) y;
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o n v e x H u l l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageConvexHull() returns the convex hull points of an image canvas.
%
% The format of the GetImageConvexHull method is:
%
% PointInfo *GetImageConvexHull(const Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the convex hull.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c)
{
/*
Order by x-coordinate, and in case of a tie, by y-coordinate.
*/
return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x));
}
static PixelInfo GetEdgeBackgroundColor(const Image *image,
const CacheView *image_view,ExceptionInfo *exception)
{
const char
*artifact;
double
census[4],
edge_census;
PixelInfo
background[4],
edge_background;
ssize_t
i;
/*
Most dominant color of edges/corners is the background color of the image.
*/
artifact=GetImageArtifact(image,"convex-hull:background-color");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"background");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i < 4; i++)
{
CacheView
*edge_view;
GravityType
gravity;
Image
*edge_image;
PixelInfo
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
census[i]=0.0;
(void) memset(&edge_geometry,0,sizeof(edge_geometry));
switch (i)
{
case 0:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
gravity=WestGravity;
edge_geometry.width=1;
edge_geometry.height=0;
break;
}
case 1:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
gravity=EastGravity;
edge_geometry.width=1;
edge_geometry.height=0;
break;
}
case 2:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
gravity=NorthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
break;
}
case 3:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
gravity=SouthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
break;
}
}
GetPixelInfoPixel(image,p,background+i);
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,background+i,
exception);
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
continue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse)
census[i]++;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
}
edge_census=(-1.0);
for (i=0; i < 4; i++)
if (census[i] > edge_census)
{
edge_background=background[i];
edge_census=census[i];
}
return(edge_background);
}
void TraceConvexHull(PointInfo *vertices,size_t number_vertices,
PointInfo ***monotone_chain,size_t *chain_length)
{
PointInfo
**chain;
ssize_t
i;
size_t
demark,
n;
/*
Construct the upper and lower hulls: rightmost to leftmost counterclockwise.
*/
chain=(*monotone_chain);
n=0;
for (i=0; i < (ssize_t) number_vertices; i++)
{
while ((n >= 2) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
demark=n+1;
for (i=(ssize_t) number_vertices-2; i >= 0; i--)
{
while ((n >= demark) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
*chain_length=n;
}
MagickExport PointInfo *GetImageConvexHull(const Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MemoryInfo
*monotone_info,
*vertices_info;
PixelInfo
background;
PointInfo
*convex_hull,
**monotone_chain,
*vertices;
size_t
n;
ssize_t
y;
/*
Identify convex hull vertices of image foreground object(s).
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices_info=AcquireVirtualMemory(image->columns,image->rows*
sizeof(*vertices));
monotone_info=AcquireVirtualMemory(2*image->columns,2*
image->rows*sizeof(*monotone_chain));
if ((vertices_info == (MemoryInfo *) NULL) ||
(monotone_info == (MemoryInfo *) NULL))
{
if (monotone_info != (MemoryInfo *) NULL)
monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info);
if (vertices_info != (MemoryInfo *) NULL)
vertices_info=RelinquishVirtualMemory(vertices_info);
return((PointInfo *) NULL);
}
vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info);
monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info);
image_view=AcquireVirtualCacheView(image,exception);
background=GetEdgeBackgroundColor(image,image_view,exception);
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
{
vertices[n].x=(double) x;
vertices[n].y=(double) y;
n++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Return the convex hull of the image foreground object(s).
*/
TraceConvexHull(vertices,n,&monotone_chain,number_vertices);
convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*convex_hull));
if (convex_hull != (PointInfo *) NULL)
for (n=0; n < *number_vertices; n++)
convex_hull[n]=(*monotone_chain[n]);
monotone_info=RelinquishVirtualMemory(monotone_info);
vertices_info=RelinquishVirtualMemory(vertices_info);
return(convex_hull);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[i])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M i n i m u m B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMinimumBoundingBox() returns the points that form the minimum
% bounding box around the image foreground objects with the "Rotating
% Calipers" algorithm. The method also returns these properties:
% minimum-bounding-box:area, minimum-bounding-box:width,
% minimum-bounding-box:height, and minimum-bounding-box:angle.
%
% The format of the GetImageMinimumBoundingBox method is:
%
% PointInfo *GetImageMinimumBoundingBox(Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the bounding box.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CaliperInfo
{
double
area,
width,
height,
projection;
ssize_t
p,
q,
v;
} CaliperInfo;
static inline double getAngle(PointInfo *p,PointInfo *q)
{
/*
Get the angle between line (p,q) and horizontal axis, in degrees.
*/
return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x)));
}
static inline double getDistance(PointInfo *p,PointInfo *q)
{
double
distance;
distance=hypot(p->x-q->x,p->y-q->y);
return(distance*distance);
}
static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Projection of vector (x,y) - p into a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance);
}
static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Distance from a point (x,y) to a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance);
}
MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CaliperInfo
caliper_info;
const char
*artifact;
double
angle,
diameter,
distance;
PointInfo
*bounding_box,
*vertices;
ssize_t
i;
size_t
number_hull_vertices;
/*
Generate the minimum bounding box with the "Rotating Calipers" algorithm.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices=GetImageConvexHull(image,&number_hull_vertices,exception);
if (vertices == (PointInfo *) NULL)
return((PointInfo *) NULL);
*number_vertices=4;
bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*bounding_box));
if (bounding_box == (PointInfo *) NULL)
{
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return((PointInfo *) NULL);
}
caliper_info.area=2.0*image->columns*image->rows;
caliper_info.width=(double) image->columns+image->rows;
caliper_info.height=0.0;
caliper_info.projection=0.0;
caliper_info.p=(-1);
caliper_info.q=(-1);
caliper_info.v=(-1);
for (i=0; i < (ssize_t) number_hull_vertices; i++)
{
double
area = 0.0,
max_projection = 0.0,
min_diameter = -1.0,
min_projection = 0.0;
ssize_t
j,
k;
ssize_t
p = -1,
q = -1,
v = -1;
for (j=0; j < (ssize_t) number_hull_vertices; j++)
{
double
diameter;
diameter=fabs(getFeretDiameter(&vertices[i],
&vertices[(i+1) % number_hull_vertices],&vertices[j]));
if (min_diameter < diameter)
{
min_diameter=diameter;
p=i;
q=(i+1) % number_hull_vertices;
v=j;
}
}
for (k=0; k < (ssize_t) number_hull_vertices; k++)
{
double
projection;
/*
Rotating calipers.
*/
projection=getProjection(&vertices[p],&vertices[q],&vertices[k]);
min_projection=MagickMin(min_projection,projection);
max_projection=MagickMax(max_projection,projection);
}
area=min_diameter*(max_projection-min_projection);
if (caliper_info.area > area)
{
caliper_info.area=area;
caliper_info.width=min_diameter;
caliper_info.height=max_projection-min_projection;
caliper_info.projection=max_projection;
caliper_info.p=p;
caliper_info.q=q;
caliper_info.v=v;
}
}
/*
Initialize minimum bounding box.
*/
diameter=getFeretDiameter(&vertices[caliper_info.p],
&vertices[caliper_info.q],&vertices[caliper_info.v]);
angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y,
vertices[caliper_info.q].x-vertices[caliper_info.p].x);
bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)*
caliper_info.projection;
bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)*
caliper_info.projection;
bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+
0.5);
bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+
0.5);
bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+
0.5);
bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+
0.5);
/*
Export minimum bounding box properties.
*/
(void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g",
GetMagickPrecision(),caliper_info.area);
(void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g",
GetMagickPrecision(),caliper_info.width);
(void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g",
GetMagickPrecision(),caliper_info.height);
(void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.p].x,
GetMagickPrecision(),vertices[caliper_info.p].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.q].x,
GetMagickPrecision(),vertices[caliper_info.q].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.v].x,
GetMagickPrecision(),vertices[caliper_info.v].y);
/*
Find smallest angle to origin.
*/
distance=hypot(bounding_box[0].x,bounding_box[0].y);
angle=getAngle(&bounding_box[0],&bounding_box[1]);
for (i=1; i < 4; i++)
{
double d = hypot(bounding_box[i].x,bounding_box[i].y);
if (d < distance)
{
distance=d;
angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]);
}
}
artifact=GetImageArtifact(image,"minimum-bounding-box:orientation");
if (artifact != (const char *) NULL)
{
double
length,
q_length,
p_length;
PointInfo
delta,
point;
/*
Find smallest perpendicular distance from edge to origin.
*/
point=bounding_box[0];
for (i=1; i < 4; i++)
{
if (bounding_box[i].x < point.x)
point.x=bounding_box[i].x;
if (bounding_box[i].y < point.y)
point.y=bounding_box[i].y;
}
for (i=0; i < 4; i++)
{
bounding_box[i].x-=point.x;
bounding_box[i].y-=point.y;
}
for (i=0; i < 4; i++)
{
double
d,
intercept,
slope;
delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x;
delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y;
slope=delta.y*PerceptibleReciprocal(delta.x);
intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x;
d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)*
PerceptibleReciprocal(sqrt(slope*slope+1.0)));
if ((i == 0) || (d < distance))
{
distance=d;
point=delta;
}
}
angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x)));
length=hypot(point.x,point.y);
p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)-
length);
q_length=fabs(length-(double) MagickMin(caliper_info.width,
caliper_info.height));
if (LocaleCompare(artifact,"landscape") == 0)
{
if (p_length > q_length)
angle+=(angle < 0.0) ? 90.0 : -90.0;
}
else
if (LocaleCompare(artifact,"portrait") == 0)
{
if (p_length < q_length)
angle+=(angle >= 0.0) ? 90.0 : -90.0;
}
}
(void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g",
GetMagickPrecision(),angle);
(void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g",
GetMagickPrecision(),-angle);
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return(bounding_box);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
bilevel;
ssize_t
x;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
bilevel=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
bilevel=MagickFalse;
break;
}
p+=GetPixelChannels(image);
}
if (bilevel == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
return(bilevel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
type=IdentifyImageGray(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
return(type);
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
Quantum
*depth_map;
ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=depth_map[ScaleQuantumToMap(q[i])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType)
q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->dither_method=FloydSteinbergDitherMethod;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
status=MinMaxStretchImage(image,0.0,0.0,1.0,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
2-2.c | #include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel num_threads(2)
#pragma omp master
for (int i = 0; i < 100; i++) {
int id = omp_get_thread_num();
printf("T%d:i%d ", id, i);
fflush(stdout);
}
}
|
cpd.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <HiParTI.h>
#ifdef HIPARTI_USE_OPENMP
#include <omp.h>
#endif
#include "../src/sptensor/hicoo/hicoo.h"
void print_usage(char ** argv) {
printf("Usage: %s [options] \n\n", argv[0]);
printf("Options: -i INPUT, --input=INPUT\n");
printf(" -o OUTPUT, --output=OUTPUT\n");
printf(" -e RENUMBER, --renumber=RENUMBER\n");
printf(" -n NITERS_RENUM\n");
printf(" -p IMPL_NUM, --impl-num=IMPL_NUM\n");
printf(" -d CUDA_DEV_ID, --cuda-dev-id=DEV_ID\n");
printf(" -r RANK\n");
printf(" -t NTHREADS, --nt=NT\n");
printf(" -u use_reduce, --ur=use_reduce\n");
printf(" --help\n");
printf("\n");
}
int main(int argc, char ** argv) {
char ifname[1000];
FILE *fo = NULL;
ptiSparseTensor X;
ptiIndex R = 16;
ptiIndex niters = 1; //5; // 50
double tol = 1e-5;
ptiKruskalTensor ktensor;
int nloops = 0;
int cuda_dev_id = -2;
int nthreads = 1;
int use_reduce = 0;
int impl_num = 0;
int renumber = 0;
int niters_renum = 3;
/* renumber:
* = 0 : no renumbering.
* = 1 : renumber with Lexi-order
* = 2 : renumber with BFS-like
* = 3 : randomly renumbering, specify niters_renum.
*/
if(argc < 2) {
print_usage(argv);
exit(1);
}
int c;
for(;;) {
static struct option long_options[] = {
{"input", required_argument, 0, 'i'},
{"output", optional_argument, 0, 'o'},
{"impl-num", optional_argument, 0, 'p'},
{"renumber", optional_argument, 0, 'e'},
{"niters-renum", optional_argument, 0, 'n'},
{"cuda-dev-id", optional_argument, 0, 'd'},
{"rank", optional_argument, 0, 'r'},
{"nt", optional_argument, 0, 't'},
{"use-reduce", optional_argument, 0, 'u'},
{"help", no_argument, 0, 0},
{0, 0, 0, 0}
};
int option_index = 0;
c = getopt_long(argc, argv, "i:o:p:e:n:d:r:t:u:", long_options, &option_index);
if(c == -1) {
break;
}
switch(c) {
case 'i':
strcpy(ifname, optarg);
printf("Input file: %s\n", ifname); fflush(stdout);
break;
case 'o':
fo = fopen(optarg, "w");
ptiAssert(fo != NULL);
printf("output file: %s\n", optarg); fflush(stdout);
break;
case 'p':
sscanf(optarg, "%d", &impl_num);
break;
case 'e':
sscanf(optarg, "%d", &renumber);
break;
case 'n':
sscanf(optarg, "%d", &niters_renum);
break;
case 'd':
sscanf(optarg, "%d", &cuda_dev_id);
break;
case 'r':
sscanf(optarg, "%u"HIPARTI_SCN_INDEX, &R);
break;
case 'u':
sscanf(optarg, "%d", &use_reduce);
break;
case 't':
sscanf(optarg, "%d", &nthreads);
break;
case '?': /* invalid option */
case 'h':
default:
print_usage(argv);
exit(1);
}
}
printf("cuda_dev_id: %d\n", cuda_dev_id);
printf("renumber: %d\n", renumber);
if (renumber == 1)
printf("niters_renum: %d\n\n", niters_renum);
ptiAssert(ptiLoadSparseTensor(&X, 1, ifname) == 0);
ptiSparseTensorStatus(&X, stdout);
// ptiDumpSparseTensor(&X, 0, stdout);
/* Renumber the input tensor */
ptiIndex ** map_inds;
if (renumber > 0) {
map_inds = (ptiIndex **)malloc(X.nmodes * sizeof *map_inds);
pti_CheckOSError(!map_inds, "MTTKRP HiCOO");
for(ptiIndex m = 0; m < X.nmodes; ++m) {
map_inds[m] = (ptiIndex *)malloc(X.ndims[m] * sizeof (ptiIndex));
pti_CheckError(!map_inds[m], "MTTKRP HiCOO", NULL);
for(ptiIndex i = 0; i < X.ndims[m]; ++i)
map_inds[m][i] = i;
}
ptiTimer renumber_timer;
ptiNewTimer(&renumber_timer, 0);
ptiStartTimer(renumber_timer);
if ( renumber == 1 || renumber == 2) { /* Set the Lexi-order or BFS-like renumbering */
#if 0
orderit(&X, map_inds, renumber, niters_renum);
#else
// Fix sb_bits = 7
ptiIndexRenumber(&X, map_inds, renumber, niters_renum, 7, nthreads, impl_num);
#endif
// orderforHiCOO((int)(X.nmodes), (ptiIndex)X.nnz, X.ndims, X.inds, map_inds);
}
if ( renumber == 3) { /* Set randomly renumbering */
printf("[Random Indexing]\n");
ptiGetRandomShuffledIndices(&X, map_inds);
}
// fflush(stdout);
ptiStopTimer(renumber_timer);
ptiPrintElapsedTime(renumber_timer, "Renumbering");
ptiFreeTimer(renumber_timer);
ptiTimer shuffle_timer;
ptiNewTimer(&shuffle_timer, 0);
ptiStartTimer(shuffle_timer);
ptiSparseTensorShuffleIndices(&X, map_inds);
ptiStopTimer(shuffle_timer);
ptiPrintElapsedTime(shuffle_timer, "Shuffling time");
ptiFreeTimer(shuffle_timer);
printf("\n");
// ptiSparseTensorSortIndex(&X, 1);
// printf("map_inds:\n");
// for(ptiIndex m = 0; m < X.nmodes; ++m) {
// ptiDumpIndexArray(map_inds[m], X.ndims[m], stdout);
// }
// ptiAssert(ptiDumpSparseTensor(&X, 0, stdout) == 0);
}
ptiIndex nmodes = X.nmodes;
ptiNewKruskalTensor(&ktensor, nmodes, X.ndims, R);
/* For warm-up caches, timing not included */
if(cuda_dev_id == -2) {
nthreads = 1;
ptiAssert(ptiCpdAls(&X, R, niters, tol, &ktensor) == 0);
} else if(cuda_dev_id == -1) {
omp_set_num_threads(nthreads);
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
printf("nthreads: %d\n", nthreads);
printf("use_reduce: %d\n", use_reduce);
ptiAssert(ptiOmpCpdAls(&X, R, niters, tol, nthreads, use_reduce, &ktensor) == 0);
}
// for(int it=0; it<nloops; ++it) {
// if(cuda_dev_id == -2) {
// nthreads = 1;
// ptiAssert(ptiCpdAls(&X, R, niters, tol, &ktensor) == 0);
// } else if(cuda_dev_id == -1) {
// #pragma omp parallel
// {
// nthreads = omp_get_num_threads();
// }
// printf("nthreads: %d\n", nthreads);
// ptiAssert(ptiOmpCpdAls(&X, R, niters, tol, nthreads, use_reduce, &ktensor) == 0);
// }
// }
if(fo != NULL) {
// Dump ktensor to files
if (renumber > 0) {
ptiKruskalTensorInverseShuffleIndices(&ktensor, map_inds);
}
ptiAssert( ptiDumpKruskalTensor(&ktensor, fo) == 0 );
fclose(fo);
}
if (renumber > 0) {
for(ptiIndex m = 0; m < X.nmodes; ++m) {
free(map_inds[m]);
}
free(map_inds);
}
ptiFreeSparseTensor(&X);
ptiFreeKruskalTensor(&ktensor);
return 0;
}
|
GB_unop__minv_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_fp32_fp32)
// op(A') function: GB (_unop_tran__minv_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = (1.0F)/z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = (1.0F)/z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = (1.0F)/z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rkb_screen.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < (n_dm+1)/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = (n_dm+2) / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, cintopt, qcond, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spsp1spsp2_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
double *qcond = opt->q_cond;
int i;
for (i = 0; i < nbas*nbas; i++) {
qcond[i] *= c1;
}
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
double *qcond = opt->q_cond + nbas*nbas;
int i;
for (i = 0; i < nbas*nbas; i++) {
qcond[i] *= c1;
}
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = .5 * (cabs(pdm[i*nao+j]) + cabs(pdm[j*nao+i]));
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmscond[iset*nbas*nbas+jsh*nbas+ish] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
dmcond[jsh*nbas+ish] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
if (nset < 3) {
fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are "
"required to set rkb prescreening\n");
exit(1);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
GB_unaryop__ainv_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_uint32
// op(A') function: GB_tran__ainv_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_uint32
(
uint32_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calculate_embedded_nodal_variable_from_skin_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Ruben Zorrilla
//
//
#if !defined(KRATOS_CALCULATE_EMBEDDED_VARIABLE_FROM_SKIN_PROCESS_INCLUDED )
#define KRATOS_CALCULATE_EMBEDDED_VARIABLE_FROM_SKIN_PROCESS_INCLUDED
// System includes
// External includes
// Project includes
#include "containers/model.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "includes/kratos_flags.h"
#include "factories/linear_solver_factory.h"
#include "elements/embedded_nodal_variable_calculation_element_simplex.h"
#include "processes/process.h"
#include "processes/find_intersected_geometrical_objects_process.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/strategies/residualbased_linear_strategy.h"
#include "utilities/intersection_utilities.h"
#include "utilities/variable_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template< class TVarType >
class EmbeddedNodalVariableFromSkinTypeHelperClass
{
public:
///@name Type Definitions
///@{
///@}
///@name Pointer Definitions
/// Pointer definition of EmbeddedNodalVariableFromSkinTypeHelperClass
KRATOS_CLASS_POINTER_DEFINITION(EmbeddedNodalVariableFromSkinTypeHelperClass);
///@}
///@name Life Cycle
///@{
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Get the Unknown Variable object
* This method returns a reference to the unknown variable. For double embedded nodal
* variables this is a reference to NODAL_MAUX. In case of array type embedded nodal
* variables, it returns a reference to NODAL_VAUX. These are the variables used when
* solving the embedded nodal values least squares minimization problem.
* @return const Variable<TVarType>& Reference to the unknown variable
*/
static inline const Variable<TVarType> &GetUnknownVariable();
/**
* @brief Add the unknown variable to a model part
* This method adds the unknown variable to the model part of interest.
* @param rModelPart Reference to the model part to which the variable is added
*/
static inline void AddUnknownVariable(ModelPart &rModelPart);
/**
* @brief Add the unknown variable DOFs to a model part
* This method adds the unknown variable DOFs to the model part of interest
* @param rModelPart Reference to the model part to which the variable DOFs are added
*/
static inline void AddUnknownVariableDofs(ModelPart &rModelPart);
///@}
};
template <>
inline const Variable<double> &EmbeddedNodalVariableFromSkinTypeHelperClass<double>::GetUnknownVariable()
{
return KratosComponents<Variable<double>>::Get("NODAL_MAUX");
}
template <>
inline const Variable<array_1d<double,3>> &EmbeddedNodalVariableFromSkinTypeHelperClass<array_1d<double,3>>::GetUnknownVariable()
{
return KratosComponents<Variable<array_1d<double, 3>>>::Get("NODAL_VAUX");
}
template <>
inline void EmbeddedNodalVariableFromSkinTypeHelperClass<double>::AddUnknownVariable(ModelPart &rModelPart)
{
rModelPart.AddNodalSolutionStepVariable(NODAL_MAUX);
}
template <>
inline void EmbeddedNodalVariableFromSkinTypeHelperClass<array_1d<double,3>>::AddUnknownVariable(ModelPart &rModelPart)
{
rModelPart.AddNodalSolutionStepVariable(NODAL_VAUX);
}
template <>
inline void EmbeddedNodalVariableFromSkinTypeHelperClass<double>::AddUnknownVariableDofs(ModelPart &rModelPart)
{
VariableUtils().AddDof(NODAL_MAUX, rModelPart);
}
template <>
inline void EmbeddedNodalVariableFromSkinTypeHelperClass<array_1d<double, 3>>::AddUnknownVariableDofs(ModelPart &rModelPart)
{
VariableUtils().AddDof(NODAL_VAUX_X, rModelPart);
VariableUtils().AddDof(NODAL_VAUX_Y, rModelPart);
VariableUtils().AddDof(NODAL_VAUX_Z, rModelPart);
}
template <class TVarType, class TSparseSpace, class TDenseSpace, class TLinearSolver>
class CalculateEmbeddedNodalVariableFromSkinProcess : public Process
{
public:
///@name Type Definitions
///@{
typedef typename TLinearSolver::Pointer LinearSolverPointerType;
typedef typename Scheme<TSparseSpace,TDenseSpace>::Pointer SchemePointerType;
typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverPointerType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::UniquePointer SolvingStrategyPointerType;
typedef typename FindIntersectedGeometricalObjectsProcess::UniquePointer FindIntersectedGeometricalObjectsProcessPointerType;
typedef std::unordered_set<std::pair<std::size_t, std::size_t>, PairHasher<std::size_t, std::size_t>, PairComparor<std::size_t, std::size_t>> EdgesSetType;
///@}
///@name Pointer Definitions
/// Pointer definition of CalculateEmbeddedNodalVariableFromSkinProcess
KRATOS_CLASS_POINTER_DEFINITION(CalculateEmbeddedNodalVariableFromSkinProcess);
///@}
///@name Life Cycle
///@{
/**
* @brief Get the Default Settings object
* This method returns the default parameters for this proces.
* Note that it is required to be static since it is called during
* the construction of the object so no instantation exists yet.
* @return Parameters Default parameters json string
*/
static Parameters GetDefaultSettings()
{
Parameters default_settings(R"(
{
"base_model_part_name": "",
"skin_model_part_name": "",
"skin_variable_name": "",
"embedded_nodal_variable_name": "",
"buffer_position": 0,
"gradient_penalty_coefficient": 0.0,
"aux_model_part_name": "IntersectedElementsModelPart",
"linear_solver_settings": {
"preconditioner_type": "amg",
"solver_type": "amgcl",
"smoother_type": "ilu0",
"krylov_type": "cg",
"max_iteration": 1000,
"verbosity": 0,
"tolerance": 1e-8,
"scaling": false,
"block_size": 1,
"use_block_matrices_if_possible": true
}
}
)");
return default_settings;
}
/**
* @brief Construct a new Calculate Embedded Nodal Variable From Skin Process object
* Constructor with model and json settings
* @param rModel Model container
* @param rSettings Settings json string
*/
CalculateEmbeddedNodalVariableFromSkinProcess(
Model &rModel,
Parameters rSettings)
: CalculateEmbeddedNodalVariableFromSkinProcess(
rModel.GetModelPart(rSettings["base_model_part_name"].GetString()),
rModel.GetModelPart(rSettings["skin_model_part_name"].GetString()),
[] (Parameters x) -> Parameters {x.ValidateAndAssignDefaults(GetDefaultSettings()); return x;} (rSettings))
{
}
/**
* @brief Construct a new Calculate Embedded Nodal Variable From Skin Process object
*
* @param rBaseModelPart Background mesh model part reference
* @param rSkinModelPart Embedded skin model part reference
* @param LinearSolverSettings Linear solver json settings
* @param rSkinVariable Skin variable to take the values from
* @param rEmbeddedNodalVariable Background mesh destination variable
* @param LevelSetType Level set type (continuous or discontinuous)
* @param BufferPosition Position in the buffer to take and save the values
* @param AuxPartName Auxiliary intersections model part name
*/
CalculateEmbeddedNodalVariableFromSkinProcess(
ModelPart &rBaseModelPart,
ModelPart &rSkinModelPart,
Parameters LinearSolverSettings,
const Variable<TVarType> &rSkinVariable,
const Variable<TVarType> &rEmbeddedNodalVariable,
const double GradientPenaltyCoefficient = 0.0,
const unsigned int BufferPosition = 0,
const std::string AuxPartName = "IntersectedElementsModelPart")
: Process(),
mBufferPosition(BufferPosition),
mAuxModelPartName(AuxPartName),
mGradientPenaltyCoefficient(GradientPenaltyCoefficient),
mrBaseModelPart(rBaseModelPart),
mrSkinModelPart(rSkinModelPart),
mrSkinVariable(rSkinVariable),
mrEmbeddedNodalVariable(rEmbeddedNodalVariable)
{
KRATOS_TRY
// Check the process settings
KRATOS_ERROR_IF(!(mBufferPosition < rBaseModelPart.GetBufferSize())) <<
"Asked for buffer position " << mBufferPosition << " buf base model part buffer size is " << rBaseModelPart.GetBufferSize() << std::endl;
KRATOS_ERROR_IF(!(mBufferPosition < rSkinModelPart.GetBufferSize())) <<
"Asked for buffer position " << mBufferPosition << " buf skin model part buffer size is " << rSkinModelPart.GetBufferSize() << std::endl;
// Check that there is at least one element and node in the model
int n_loc_mesh_nodes = mrBaseModelPart.GetCommunicator().pLocalMesh()->NumberOfNodes();
int n_loc_mesh_elements = mrBaseModelPart.GetCommunicator().pLocalMesh()->NumberOfElements();
KRATOS_ERROR_IF(mrBaseModelPart.GetCommunicator().GetDataCommunicator().SumAll(n_loc_mesh_nodes) == 0) << "The base model part has no nodes." << std::endl;
KRATOS_ERROR_IF(mrBaseModelPart.GetCommunicator().GetDataCommunicator().SumAll(n_loc_mesh_elements) == 0) << "The base model Part has no elements." << std::endl;
// Check that the base model part is conformed by simplex elements
const auto &r_aux_geom = (mrBaseModelPart.ElementsBegin())->GetGeometry();
const unsigned int dim = r_aux_geom.Dimension();
if(dim == 2){
KRATOS_ERROR_IF(r_aux_geom.GetGeometryFamily() != GeometryData::Kratos_Triangle) <<
"In 2D the element type is expected to be a triangle." << std::endl;
} else if(dim == 3) {
KRATOS_ERROR_IF(r_aux_geom.GetGeometryFamily() != GeometryData::Kratos_Tetrahedra) <<
"In 3D the element type is expected to be a tetrahedron" << std::endl;
} else {
KRATOS_ERROR << "Wrong geometry Dimension(). Expected 2 or 3 and obtained: " << dim;
}
// Construct the linear solver pointer
LinearSolverFactory<TSparseSpace, TDenseSpace> linear_solver_factory;
mpLinearSolver = linear_solver_factory.Create(LinearSolverSettings);
KRATOS_CATCH("")
}
/// Destructor.
~CalculateEmbeddedNodalVariableFromSkinProcess() override
{
Model& current_model = mrBaseModelPart.GetModel();
if(current_model.HasModelPart(mAuxModelPartName)) {
current_model.DeleteModelPart(mAuxModelPartName);
}
};
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
ModelPart &GetIntersectedEdgesModelPart() const
{
Model ¤t_model = mrBaseModelPart.GetModel();
return current_model.GetModelPart(mAuxModelPartName);
}
void Execute() override
{
KRATOS_TRY;
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
this->GenerateIntersectedEdgesElementsModelPart();
// Set the linear strategy to solve the regression problem
this->SetLinearStrategy();
// Solve the regression problem
mpSolvingStrategy->Solve();
// Copy the obtained values from the unknown variable to the user-defined variable
this->SetObtainedEmbeddedNodalValues();
KRATOS_CATCH("")
}
virtual void Clear()
{
Model& current_model = mrBaseModelPart.GetModel();
ModelPart& r_intersected_edges_model_part = current_model.GetModelPart( mAuxModelPartName );
r_intersected_edges_model_part.Nodes().clear();
r_intersected_edges_model_part.Elements().clear();
r_intersected_edges_model_part.Conditions().clear();
mpSolvingStrategy->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "CalculateEmbeddedNodalVariableFromSkinProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "CalculateEmbeddedNodalVariableFromSkinProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
const unsigned int mBufferPosition;
const std::string mAuxModelPartName;
const double mGradientPenaltyCoefficient;
ModelPart& mrBaseModelPart;
ModelPart& mrSkinModelPart;
const Variable<TVarType> &mrSkinVariable;
const Variable<TVarType> &mrEmbeddedNodalVariable;
LinearSolverPointerType mpLinearSolver = nullptr;
SolvingStrategyPointerType mpSolvingStrategy = nullptr;
FindIntersectedGeometricalObjectsProcessPointerType mpFindIntersectedGeometricalObjectsProcess;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
virtual void GenerateIntersectedEdgesElementsModelPart()
{
KRATOS_TRY
// Compute element intersections
this->CalculateIntersections();
Model& current_model = mrBaseModelPart.GetModel();
if(current_model.HasModelPart(mAuxModelPartName)) {
current_model.DeleteModelPart(mAuxModelPartName);
}
// Generate the auxiliary model part
ModelPart& r_int_elems_model_part = current_model.CreateModelPart(mAuxModelPartName);
r_int_elems_model_part.Nodes().clear();
r_int_elems_model_part.Elements().clear();
r_int_elems_model_part.Conditions().clear();
r_int_elems_model_part.SetBufferSize(1);
r_int_elems_model_part.CreateNewProperties(0, 0);
// Set the gradient penalty coefficient in the auxiliary model part process info
r_int_elems_model_part.GetProcessInfo()[GRADIENT_PENALTY_COEFFICIENT] = mGradientPenaltyCoefficient;
// Add the minimization problem auxiliary variables
this->AddIntersectedElementsVariables(r_int_elems_model_part);
// Add intersected elements
this->AddIntersectedElementsModelPartElements(r_int_elems_model_part);
// Add DOFs to intersected elements model part
this->AddIntersectedElementsModelPartDOFs(r_int_elems_model_part);
KRATOS_CATCH("")
}
void SetObtainedEmbeddedNodalValues() const
{
const auto &rUnknownVariable = EmbeddedNodalVariableFromSkinTypeHelperClass<TVarType>::GetUnknownVariable();
const auto &r_int_elems_model_part = (mrBaseModelPart.GetModel()).GetModelPart(mAuxModelPartName);
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(r_int_elems_model_part.NumberOfNodes()); ++i_node) {
const auto it_node = r_int_elems_model_part.NodesBegin() + i_node;
auto &r_emb_nod_val = (mrBaseModelPart.GetNode(it_node->Id())).FastGetSolutionStepValue(mrEmbeddedNodalVariable, mBufferPosition);
r_emb_nod_val = it_node->FastGetSolutionStepValue(rUnknownVariable);
}
}
inline void AddIntersectedElementsVariables(ModelPart &rModelPart) const
{
EmbeddedNodalVariableFromSkinTypeHelperClass<TVarType>::AddUnknownVariable(rModelPart);
}
void AddIntersectedElementsModelPartDOFs(ModelPart &rModelPart) const
{
EmbeddedNodalVariableFromSkinTypeHelperClass<TVarType>::AddUnknownVariableDofs(rModelPart);
}
void AddIntersectedElementsModelPartElements(ModelPart &rModelPart) const
{
// Initialize the VISITED flag in the origin model part
// It will be used to mark the nodes already added to the intersected elements model part
VariableUtils().SetFlag(VISITED, false, mrBaseModelPart.Nodes());
// Initialize the INTERFACE flag in the origin model part
// It will be used to mark the elements that have any intersection with the skin model part
VariableUtils().SetFlag(INTERFACE, false, mrBaseModelPart.Elements());
// Create element edges map
EdgesSetType edges_set;
// Get the base model part intersections
auto &r_int_obj_vect = mpFindIntersectedGeometricalObjectsProcess->GetIntersections();
// Get the unknown variable from Kratos components
const auto &rUnknownVariable = EmbeddedNodalVariableFromSkinTypeHelperClass<TVarType>::GetUnknownVariable();
// Loop the base model part elements
std::size_t new_elem_id = 1;
for (unsigned int i_elem = 0; i_elem < mrBaseModelPart.NumberOfElements(); ++i_elem) {
auto it_elem = mrBaseModelPart.ElementsBegin() + i_elem;
// Check if the current element has intersections
if (r_int_obj_vect[i_elem].size() != 0) {
// Initialize the element values
auto &r_geom = it_elem->GetGeometry();
const auto edges = r_geom.GenerateEdges();
// Loop the edges
for (unsigned int i_edge = 0; i_edge < r_geom.EdgesNumber(); ++i_edge) {
// Check if the current edge is already stored
auto &r_i_edge_geom = edges[i_edge];
auto i_edge_pair = this->SetEdgePair(r_i_edge_geom);
if (edges_set.find(i_edge_pair) == edges_set.end()) {
// Initialize edge values
double i_edge_d = 0.0; // Average normalized distance from lower id. node
unsigned int n_int_obj = 0; // Number edge of intersecting entities
TVarType i_edge_val = mrEmbeddedNodalVariable.Zero(); // Average edge variable value
// Check the edge intersection against all the candidates
for (auto &r_int_obj : r_int_obj_vect[i_elem]) {
Point intersection_point;
const bool is_intersected = this->ComputeEdgeIntersection(
r_int_obj.GetGeometry(),
r_i_edge_geom[0],
r_i_edge_geom[1],
intersection_point);
// Compute the variable value in the intersection point
if (is_intersected) {
n_int_obj++;
Vector int_obj_N;
array_1d<double,3> local_coords;
r_int_obj.GetGeometry().PointLocalCoordinates(local_coords, intersection_point);
r_int_obj.GetGeometry().ShapeFunctionsValues(int_obj_N, local_coords);
for (unsigned int i_node = 0; i_node < r_int_obj.GetGeometry().PointsNumber(); ++i_node) {
i_edge_val += r_int_obj.GetGeometry()[i_node].FastGetSolutionStepValue(mrSkinVariable, mBufferPosition) * int_obj_N[i_node];
}
i_edge_d += norm_2(intersection_point - r_i_edge_geom[0]) / r_i_edge_geom.Length();
}
}
// Check if the edge is intersected
if (n_int_obj != 0) {
// Flag the edge parent element if the edge is intersected by any entity
it_elem->Set(INTERFACE, true);
// Add the average edge value (there might exist cases in where
// more than one geometry intersects the edge of interest).
i_edge_d /= n_int_obj;
i_edge_val /= n_int_obj;
// If not added yet, add the edge nodes
this->AddEdgeNodes(r_i_edge_geom, rModelPart);
// Create a new element with the intersected edge geometry and fake properties
auto p_element = Kratos::make_intrusive<EmbeddedNodalVariableCalculationElementSimplex<TVarType>>(
new_elem_id,
this->pSetEdgeElementGeometry(rModelPart, r_i_edge_geom, i_edge_pair),
rModelPart.pGetProperties(0));
// Save the edge values in the new element
p_element->SetValue(DISTANCE, i_edge_d);
p_element->SetValue(rUnknownVariable, i_edge_val);
// Update the id. counter
new_elem_id++;
// Add the new edge element to the hash map
edges_set.insert(i_edge_pair);
// Add the new edge element to the intersected elements model part
rModelPart.Elements().push_back(p_element);
}
}
}
}
}
}
void SetLinearStrategy()
{
// Create the linear strategy
SchemePointerType p_scheme = Kratos::make_shared<ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>>();
bool calculate_norm_dx = false;
bool calculate_reactions = false;
bool reform_dof_at_each_iteration = false;
BuilderSolverPointerType p_builder_and_solver = Kratos::make_shared<ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>>(mpLinearSolver);
Model ¤t_model = mrBaseModelPart.GetModel();
ModelPart &r_aux_model_part = current_model.GetModelPart(mAuxModelPartName);
mpSolvingStrategy = Kratos::make_unique<ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>>(
r_aux_model_part,
p_scheme,
mpLinearSolver,
p_builder_and_solver,
calculate_reactions,
reform_dof_at_each_iteration,
calculate_norm_dx);
mpSolvingStrategy->Check();
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
/**
* @brief Construct a new Calculate Embedded Nodal Variable From Skin Process object
* Constructor with background and skin model parts as well as json settings. This
* constructor is intentionally protected to avoid exposing it to the user since it
* is intended to serve as an auxiliar constructor to bridge from the model and
* parameters one, which checks the provided settings with the defaults, to the "old
* fashioned" one. This allows keeping the member variables as const as well as to
* have a unique implementation of the constructor required checks and operations.
* @param rBaseModelPart Background mesh model part reference
* @param rSkinModelPart Embedded skin model part reference
* @param rSettings Settings json string
*/
CalculateEmbeddedNodalVariableFromSkinProcess(
ModelPart &rBaseModelPart,
ModelPart &rSkinModelPart,
Parameters rSettings)
: CalculateEmbeddedNodalVariableFromSkinProcess(
rBaseModelPart,
rSkinModelPart,
rSettings["linear_solver_settings"],
KratosComponents<Variable<TVarType>>::Get(rSettings["skin_variable_name"].GetString()),
KratosComponents<Variable<TVarType>>::Get(rSettings["embedded_nodal_variable_name"].GetString()),
rSettings["gradient_penalty_coefficient"].GetDouble(),
rSettings["buffer_position"].GetInt(),
rSettings["aux_model_part_name"].GetString())
{
}
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void CalculateIntersections()
{
mpFindIntersectedGeometricalObjectsProcess = Kratos::make_unique<FindIntersectedGeometricalObjectsProcess>(mrBaseModelPart, mrSkinModelPart);
mpFindIntersectedGeometricalObjectsProcess->Initialize();
mpFindIntersectedGeometricalObjectsProcess->FindIntersections();
}
void ClearIntersections()
{
mpFindIntersectedGeometricalObjectsProcess->Clear();
}
bool ComputeEdgeIntersection(
const Element::GeometryType& rIntObjGeometry,
const Element::NodeType& rEdgePoint1,
const Element::NodeType& rEdgePoint2,
Point& rIntersectionPoint) const
{
bool intersection_flag = false;
const unsigned int work_dim = rIntObjGeometry.WorkingSpaceDimension();
if (work_dim == 2){
const unsigned int intersection_status = IntersectionUtilities::ComputeLineLineIntersection<Element::GeometryType>(
rIntObjGeometry, rEdgePoint1.Coordinates(), rEdgePoint2.Coordinates(), rIntersectionPoint.Coordinates());
if (intersection_status == 1 || intersection_status == 3) {
intersection_flag = true;
}
} else if (work_dim == 3){
const unsigned int intersection_status = IntersectionUtilities::ComputeTriangleLineIntersection<Element::GeometryType>(
rIntObjGeometry, rEdgePoint1.Coordinates(), rEdgePoint2.Coordinates(), rIntersectionPoint.Coordinates());
if (intersection_status == 1) {
intersection_flag = true;
}
} else {
KRATOS_ERROR << "Working space dimension value equal to " << work_dim << ". Check your skin geometry implementation." << std::endl;
}
return intersection_flag;
}
void AddEdgeNodes(
const Geometry<Node<3>> &rEdgeGeometry,
ModelPart &rModelPart) const
{
// Loop the edge nodes
for (std::size_t i = 0; i < 2; ++i) {
auto p_i_node = rEdgeGeometry(i);
// Check if the node has been already added
if (!p_i_node->Is(VISITED)) {
p_i_node->Set(VISITED, true);
rModelPart.CreateNewNode(p_i_node->Id(), *p_i_node);
}
}
}
Element::GeometryType::Pointer pSetEdgeElementGeometry(
ModelPart &rModelPart,
const Element::GeometryType &rCurrentEdgeGeometry,
const std::pair<std::size_t, std::size_t> NewEdgeIds) const
{
Element::GeometryType::PointsArrayType points_array;
points_array.push_back(rModelPart.pGetNode(std::get<0>(NewEdgeIds)));
points_array.push_back(rModelPart.pGetNode(std::get<1>(NewEdgeIds)));
return rCurrentEdgeGeometry.Create(points_array);
}
inline std::pair<std::size_t, std::size_t> SetEdgePair(const Geometry<Node<3>> &rEdgeGeom) const
{
std::pair<std::size_t, std::size_t> edge_pair(
(rEdgeGeom[0].Id() < rEdgeGeom[1].Id()) ? rEdgeGeom[0].Id() : rEdgeGeom[1].Id(),
(rEdgeGeom[0].Id() > rEdgeGeom[1].Id()) ? rEdgeGeom[0].Id() : rEdgeGeom[1].Id());
return edge_pair;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
CalculateEmbeddedNodalVariableFromSkinProcess& operator=(CalculateEmbeddedNodalVariableFromSkinProcess const& rOther) = delete;
/// Copy constructor.
CalculateEmbeddedNodalVariableFromSkinProcess(CalculateEmbeddedNodalVariableFromSkinProcess const& rOther) = delete;
///@}
}; // Class CalculateEmbeddedNodalVariableFromSkinProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template< class TVarType, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::istream& operator >> (
std::istream& rIStream,
CalculateEmbeddedNodalVariableFromSkinProcess<TVarType, TSparseSpace, TDenseSpace, TLinearSolver>& rThis);
/// output stream function
template< class TVarType, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::ostream& operator << (
std::ostream& rOStream,
const CalculateEmbeddedNodalVariableFromSkinProcess<TVarType, TSparseSpace, TDenseSpace, TLinearSolver>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_CALCULATE_EMBEDDED_VARIABLE_FROM_SKIN_PROCESS_INCLUDED defined
|
drupal7_fmt_plug.c | /*
* Drupal 7 phpass variant using SHA-512 and hashes cut at 258 bits.
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* These are 8 byte salted hashes with a loop count that defines the number
* of loops to compute. Drupal uses 258 bits of the hash, this is a multiple of
* 6 but not 8. I presume this is for getting unpadded base64. Anyway we store
* an extra byte but for now we will only compare 256 bits. I doubt that will
* pose any problems. Actually I'm not quite sure the last bits end up correct
* from the current version of get_binary().
*
* Based on [old thick] phpass-md5.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_drupal7;
#elif FMT_REGISTERS_H
john_register_one(&fmt_drupal7);
#else
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Drupal7"
#define FORMAT_NAME "$S$"
#define FORMAT_TAG "$S$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (x16385)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 47
#define CIPHERTEXT_LENGTH 55
#define DIGEST_SIZE (512/8)
#define BINARY_SIZE (258/8) // ((258+7)/8)
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"$S$CwkjgAKeSx2imSiN3SyBEg8e0sgE2QOx4a/VIfCHN0BZUNAWCr1X", "virtualabc"},
{"$S$CFURCPa.k6FAEbJPgejaW4nijv7rYgGc4dUJtChQtV4KLJTPTC/u", "password"},
{"$S$C6x2r.aW5Nkg7st6/u.IKWjTerHXscjPtu4spwhCVZlP89UKcbb/", "NEW_TEMP_PASSWORD"},
{NULL}
};
/*
* NOTE, due to the 0x4000 iteration count, I am not wasting time pre-loading
* keys/salts. We will simply add SIMD code to the crypt_all. We could only
* gain < .1% worrying about all the extra stuff from set_key, get_key, the
* hashes, etc needed to split out SIMD. We just keep all input data in 'flat'
* format, switch to SIMD, do the 0x4000 loops, and put output back into 'flat'
* layout again. So we have no 'static' SIMD objects.
*/
static unsigned char *cursalt;
static unsigned loopCnt;
static unsigned char (*EncKey)[PLAINTEXT_LENGTH + 1];
static unsigned int *EncKeyLen;
static char (*crypt_key)[DIGEST_SIZE];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
EncKey = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKey));
EncKeyLen = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKeyLen));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(EncKeyLen);
MEM_FREE(EncKey);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
unsigned count_log2;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
for (i = FORMAT_TAG_LEN; i < CIPHERTEXT_LENGTH; ++i)
if (atoi64[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
count_log2 = atoi64[ARCH_INDEX(ciphertext[3])];
if (count_log2 < 7 || count_log2 > 31)
return 0;
return 1;
}
static void set_salt(void *salt)
{
loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]));
cursalt = salt;
}
static void set_key(char *key, int index)
{
int len;
len = strlen(key);
EncKeyLen[index] = len;
memcpy(((char*)EncKey[index]), key, len + 1);
}
static char *get_key(int index)
{
return (char*)EncKey[index];
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT)
{
#ifdef SIMD_COEF_64
unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;
uint64_t *keys64;
unsigned i, j, len, Lcount = loopCnt;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys64 = (uint64_t*)keys;
memset(keys, 0, 128*MAX_KEYS_PER_CRYPT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < 8; ++j)
keys[GETPOS(j, i)] = cursalt[j];
for (j = 0; j < len; ++j)
keys[GETPOS(j+8, i)] = EncKey[index+i][j];
keys[GETPOS(j+8, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+8) << 3;
}
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < len; ++j)
keys[GETPOS(j+64, i)] = EncKey[index+i][j];
keys[GETPOS(j+64, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+64) << 3;
}
while (--Lcount)
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Last one with FLAT_OUT
SIMDSHA512body(keys, (uint64_t*)crypt_key[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);
#else
SHA512_CTX ctx;
unsigned char tmp[DIGEST_SIZE + PLAINTEXT_LENGTH];
int len = EncKeyLen[index];
unsigned Lcount = loopCnt - 1;
SHA512_Init( &ctx );
SHA512_Update( &ctx, cursalt, 8 );
SHA512_Update( &ctx, EncKey[index], len );
memcpy(&tmp[DIGEST_SIZE], (char *)EncKey[index], len);
SHA512_Final( tmp, &ctx);
len += DIGEST_SIZE;
do {
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( tmp, &ctx);
} while (--Lcount);
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( (unsigned char *) crypt_key[index], &ctx);
#endif
}
return count;
}
static void * get_binary(char *ciphertext)
{
int i;
unsigned sixbits;
static union {
unsigned char u8[BINARY_SIZE + 1];
uint32_t u32;
} out;
int bidx=0;
char *pos;
pos = &ciphertext[FORMAT_TAG_LEN + 1 + 8];
for (i = 0; i < 10; ++i) {
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
sixbits >>= 4;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<2);
}
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
return out.u8;
}
static void * get_salt(char *ciphertext)
{
static union {
unsigned char u8[SALT_SIZE + 1];
uint32_t u32;
} salt;
// store off the 'real' 8 bytes of salt
memcpy(salt.u8, &ciphertext[FORMAT_TAG_LEN+1], 8);
// append the 1 byte of loop count information.
salt.u8[8] = ciphertext[FORMAT_TAG_LEN];
return salt.u8;
}
static int get_hash_0(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_0; }
static int get_hash_1(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_1; }
static int get_hash_2(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_2; }
static int get_hash_3(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_3; }
static int get_hash_4(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_4; }
static int get_hash_5(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_5; }
static int get_hash_6(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_6; }
static int salt_hash(void *salt)
{
return *((uint32_t *)salt) & 0x3FF;
}
static unsigned int iteration_count(void *salt)
{
return (unsigned int) 1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]);
}
struct fmt_main fmt_drupal7 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
// true salt is SALT_SIZE but we add the loop count
SALT_SIZE + 1,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_iso_expand.c | //------------------------------------------------------------------------------
// GB_iso_expand: expand a scalar into an entire array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
#include "GB_is_nonzero.h"
void GB_iso_expand // expand an iso scalar into an entire array
(
void *restrict X, // output array to expand into
int64_t n, // # of entries in X
void *restrict scalar, // scalar to expand into X
size_t size, // size of the scalar and each entry of X
GB_Context Context
)
{
//--------------------------------------------------------------------------
// determine how many threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// copy the scalar into X
//--------------------------------------------------------------------------
if (GB_is_nonzero (scalar, size))
{
//----------------------------------------------------------------------
// the scalar is nonzero
//----------------------------------------------------------------------
int64_t p ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
switch (size)
{
case GB_1BYTE : // bool, uint8, int8, and UDT of size 1
{
uint8_t a0 = (*((uint8_t *) scalar)) ;
uint8_t *restrict Z = (uint8_t *) X ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < n ; p++)
{
Z [p] = a0 ;
}
}
break ;
case GB_2BYTE : // uint16, int16, and UDT of size 2
{
uint16_t a0 = (*((uint16_t *) scalar)) ;
uint16_t *restrict Z = (uint16_t *) X ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < n ; p++)
{
Z [p] = a0 ;
}
}
break ;
case GB_4BYTE : // uint32, int32, float, and UDT of size 4
{
uint32_t a0 = (*((uint32_t *) scalar)) ;
uint32_t *restrict Z = (uint32_t *) X ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < n ; p++)
{
Z [p] = a0 ;
}
}
break ;
case GB_8BYTE : // uint64, int64, double, float complex, UDT size 8
{
uint64_t a0 = (*((uint64_t *) scalar)) ;
uint64_t *restrict Z = (uint64_t *) X ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < n ; p++)
{
Z [p] = a0 ;
}
}
break ;
case GB_16BYTE : // double complex, and UDT size 16
{
GB_blob16 a0 = (*((GB_blob16 *) scalar)) ;
GB_blob16 *restrict Z = (GB_blob16 *) X ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < n ; p++)
{
Z [p] = a0 ;
}
}
break ;
default : // user-defined types of arbitrary size
{
GB_void *restrict Z = (GB_void *) X ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < n ; p++)
{
memcpy (Z + p*size, scalar, size) ;
}
}
break ;
}
}
else
{
//----------------------------------------------------------------------
// the scalar is zero: use memset
//----------------------------------------------------------------------
GB_memset (X, 0, n*size, nthreads_max) ;
}
}
|
mkldnn_quantize_v2-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mkldnn_quantize_v2-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#if MXNET_USE_ONEDNN == 1
#include <algorithm>
#include <string>
#include <vector>
#include "../../nn/mkldnn/mkldnn_base-inl.h"
#include "../quantize_v2-inl.h"
namespace mxnet {
namespace op {
class SgMKLDNNQuantizeOperator {
public:
explicit SgMKLDNNQuantizeOperator(const nnvm::NodeAttrs& attrs)
: param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {}
void Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
private:
bool initalized_{false};
QuantizeV2Param param_;
float cached_data_min_{0.f};
float cached_data_max_{0.f};
mkldnn::memory::desc o_desc_;
mkldnn_args_map_t args_;
std::shared_ptr<mkldnn::reorder> fwd_pd_;
};
void SgMKLDNNQuantizeOperator::Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
float quantized_range = 0.0;
NDArray in_buffer = inputs[0];
float data_min = mshadow::red::limits::MaxValue<float>();
float data_max = mshadow::red::limits::MinValue<float>();
// Pass through quantized data
if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) {
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
*outputs[1].data().dptr<float>() = param_.min_calib_range.value();
*outputs[2].data().dptr<float>() = param_.max_calib_range.value();
} else {
if (inputs[0].dtype() == mshadow::kUint8) {
*outputs[1].data().dptr<float>() = 0;
*outputs[2].data().dptr<float>() = kUint8Range;
} else {
*outputs[1].data().dptr<float>() = -kInt8Range;
*outputs[2].data().dptr<float>() = kInt8Range;
}
}
if (req[0] != kWriteInplace) {
const_cast<NDArray&>(outputs[0]).CopyFrom(*inputs[0].GetMKLDNNData());
MKLDNNStream::Get()->Submit();
}
} else {
if (in_buffer.IsView() && in_buffer.IsMKLDNNData())
in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetMKLDNNData();
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
data_min = param_.min_calib_range.value();
data_max = param_.max_calib_range.value();
} else {
// no calib info
in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<float>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
std::vector<float> data_maxs(nthreads, data_max);
std::vector<float> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid])
data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid])
data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max)
data_max = data_maxs[i];
if (data_mins[i] < data_min)
data_min = data_mins[i];
}
if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max))
initalized_ = false;
}
// Write output min/max
auto out_type = GetQuantizeOutputType(param_);
if (out_type == mshadow::kUint8) {
quantized_range = kUint8Range;
*outputs[1].data().dptr<float>() = data_min;
*outputs[2].data().dptr<float>() = data_max;
} else if (out_type == mshadow::kInt8) {
float real_range = MaxAbs(data_min, data_max);
quantized_range = kInt8Range;
*outputs[1].data().dptr<float>() = -real_range;
*outputs[2].data().dptr<float>() = real_range;
} else {
LOG(FATAL) << "mkldnn quantize op only supports int8 and uint8 as output type";
}
if (!initalized_) {
cached_data_min_ = data_min;
cached_data_max_ = data_max;
float real_range = MaxAbs(data_min, data_max);
float scale = quantized_range / real_range;
mkldnn::primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
mkldnn::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
auto i_desc = i_mem->get_desc();
size_t i_ndim = in_buffer.shape().ndim();
if (i_ndim == 4) {
mkldnn::memory::format_tag o_fmt = mkldnn::memory::format_tag::nhwc;
mkldnn::memory::dims o_dims(i_desc.data.dims, i_desc.data.dims + i_desc.data.ndims);
o_desc_ = mkldnn::memory::desc(o_dims, get_mkldnn_type(out_type), o_fmt);
} else {
o_desc_ = i_desc;
o_desc_.data.data_type = get_mkldnn_type_t(out_type);
}
auto reorder_pd =
mkldnn::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc_, attr);
fwd_pd_ = std::make_shared<mkldnn::reorder>(reorder_pd);
initalized_ = true;
}
auto o_mem = CreateMKLDNNMem(outputs[0], o_desc_, req[0]);
args_[MKLDNN_ARG_FROM] = *i_mem;
args_[MKLDNN_ARG_TO] = *o_mem.second;
MKLDNNStream::Get()->RegisterPrimArgs(*fwd_pd_, args_);
CommitOutput(outputs[0], o_mem);
MKLDNNStream::Get()->Submit();
}
}
static void SgMKLDNNQuantizeForward(const OpStatePtr& state_ptr,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
SgMKLDNNQuantizeOperator& op = state_ptr.get_state<SgMKLDNNQuantizeOperator>();
op.Forward(ctx, inputs, req, outputs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
(void) GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
(void) GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case CompositeMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | CompositeMaskChannel);
break;
}
case ReadMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | ReadMaskChannel);
break;
}
case WriteMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | WriteMaskChannel);
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask |
(1UL << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (colorspace != UndefinedColorspace)
(void) SetImageColorspace(combine_image,colorspace,exception);
else
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace,exception);
else
(void) SetImageColorspace(combine_image,sRGBColorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
ssize_t
x;
PixelChannel channel = GetPixelChannelChannel(combine_image,i);
PixelTrait traits = GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelIntensity(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CombineImageTag,progress,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1UL << channel),
exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
ssize_t
i;
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
PixelInfo
background;
/*
Remove transparency.
*/
ConformPixelInfo(image,&image->background_color,&background,exception);
background.alpha_trait=BlendPixelTrait;
image->alpha_trait=BlendPixelTrait;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=background;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.alpha=GetPixelIntensity(image,q);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
break;
}
case TransparentAlphaChannel:
{
status=SetImageAlpha(image,TransparentAlpha,exception);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
order-3.c | void foo (void);
int v;
#ifdef __cplusplus
extern "C" {
#endif
int omp_get_thread_num (void);
int omp_get_num_threads (void);
int omp_target_is_present (const void *, int);
int omp_get_cancellation (void);
#ifdef __cplusplus
}
#endif
void
f1 (int *a)
{
int i;
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp parallel /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
int j;
#pragma omp simd
for (j = 0; j < 64; j++)
a[64 * i + j] = i + j;
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp critical /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
v++;
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic read /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_thread_num (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_num_threads (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_target_is_present (a + i, 0); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_cancellation (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
}
void
f2 (int *a)
{
int i;
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp parallel /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
int j;
#pragma omp simd
for (j = 0; j < 64; j++)
a[64 * i + j] = i + j;
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp critical /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
v++;
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic read /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_thread_num (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_num_threads (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_target_is_present (a + i, 0); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_cancellation (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
}
void
f3 (int *a)
{
int i;
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp parallel
foo ();
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
int j;
#pragma omp simd
for (j = 0; j < 64; j++)
a[64 * i + j] = i + j;
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp critical /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
v++;
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic read /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp task /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
a[i]++;
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
int j;
#pragma omp taskloop /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
for (j = 0; j < 64; j++)
a[64 * i + j] = i + j;
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_thread_num (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_num_threads (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_target_is_present (a + i, 0); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
a[i] += omp_get_cancellation (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
}
|
tvl1flow_lib.c |
// This program is free software: you can use, modify and/or redistribute it
// under the terms of the simplified BSD License. You should have received a
// copy of this license along this program. If not, see
// <http://www.opensource.org/licenses/bsd-license.html>.
//
// Copyright (C) 2011, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es>
// All rights reserved.
#ifndef DUAL_TVL1_OPTIC_FLOW_H
#define DUAL_TVL1_OPTIC_FLOW_H
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "mask.c"
#include "bicubic_interpolation.c"
#include "zoom.c"
#define MAX_ITERATIONS 300
#define PRESMOOTHING_SIGMA 0.8
#define GRAD_IS_ZERO 1E-10
/**
* Implementation of the Zach, Pock and Bischof dual TV-L1 optic flow method
*
* see reference:
* [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime
* TV-L1 Optical Flow", In Proceedings of Pattern Recognition (DAGM),
* Heidelberg, Germany, pp. 214-223, 2007
*
*
* Details on the total variation minimization scheme can be found in:
* [2] A. Chambolle, "An Algorithm for Total Variation Minimization and
* Applications", Journal of Mathematical Imaging and Vision, 20: 89-97, 2004
**/
/**
*
* Function to compute the optical flow in one scale
*
**/
void Dual_TVL1_optic_flow(
float *I0, // source image
float *I1, // target image
float *u1, // x component of the optical flow
float *u2, // y component of the optical flow
const int nx, // image width
const int ny, // image height
const float tau, // time step
const float lambda, // weight parameter for the data term
const float theta, // weight parameter for (u - v)²
const int warps, // number of warpings per scale
const float epsilon, // tolerance for numerical convergence
const bool verbose // enable/disable the verbose mode
)
{
const int size = nx * ny;
const float l_t = lambda * theta;
size_t sf = sizeof(float);
float *I1x = malloc(size*sf);
float *I1y = xmalloc(size*sf);
float *I1w = xmalloc(size*sf);
float *I1wx = xmalloc(size*sf);
float *I1wy = xmalloc(size*sf);
float *rho_c = xmalloc(size*sf);
float *v1 = xmalloc(size*sf);
float *v2 = xmalloc(size*sf);
float *p11 = xmalloc(size*sf);
float *p12 = xmalloc(size*sf);
float *p21 = xmalloc(size*sf);
float *p22 = xmalloc(size*sf);
float *div = xmalloc(size*sf);
float *grad = xmalloc(size*sf);
float *div_p1 = xmalloc(size*sf);
float *div_p2 = xmalloc(size*sf);
float *u1x = xmalloc(size*sf);
float *u1y = xmalloc(size*sf);
float *u2x = xmalloc(size*sf);
float *u2y = xmalloc(size*sf);
centered_gradient(I1, I1x, I1y, nx, ny);
// initialization of p
for (int i = 0; i < size; i++)
{
p11[i] = p12[i] = 0.0;
p21[i] = p22[i] = 0.0;
}
for (int warpings = 0; warpings < warps; warpings++)
{
// compute the warping of the target image and its derivatives
bicubic_interpolation_warp(I1, u1, u2, I1w, nx, ny, true);
bicubic_interpolation_warp(I1x, u1, u2, I1wx, nx, ny, true);
bicubic_interpolation_warp(I1y, u1, u2, I1wy, nx, ny, true);
#pragma omp parallel for
for (int i = 0; i < size; i++)
{
const float Ix2 = I1wx[i] * I1wx[i];
const float Iy2 = I1wy[i] * I1wy[i];
// store the |Grad(I1)|^2
grad[i] = (Ix2 + Iy2);
// compute the constant part of the rho function
rho_c[i] = (I1w[i] - I1wx[i] * u1[i]
- I1wy[i] * u2[i] - I0[i]);
}
int n = 0;
float error = INFINITY;
while (error > epsilon * epsilon && n < MAX_ITERATIONS)
{
n++;
// estimate the values of the variable (v1, v2)
// (thresholding opterator TH)
#pragma omp parallel for
for (int i = 0; i < size; i++)
{
const float rho = rho_c[i]
+ (I1wx[i] * u1[i] + I1wy[i] * u2[i]);
float d1, d2;
if (rho < - l_t * grad[i])
{
d1 = l_t * I1wx[i];
d2 = l_t * I1wy[i];
}
else
{
if (rho > l_t * grad[i])
{
d1 = -l_t * I1wx[i];
d2 = -l_t * I1wy[i];
}
else
{
if (grad[i] < GRAD_IS_ZERO)
d1 = d2 = 0;
else
{
float fi = -rho/grad[i];
d1 = fi * I1wx[i];
d2 = fi * I1wy[i];
}
}
}
v1[i] = u1[i] + d1;
v2[i] = u2[i] + d2;
}
// compute the divergence of the dual variable (p1, p2)
divergence(p11, p12, div_p1, nx ,ny);
divergence(p21, p22, div_p2, nx ,ny);
// estimate the values of the optical flow (u1, u2)
error = 0.0;
#pragma omp parallel for reduction(+:error)
for (int i = 0; i < size; i++)
{
const float u1k = u1[i];
const float u2k = u2[i];
u1[i] = v1[i] + theta * div_p1[i];
u2[i] = v2[i] + theta * div_p2[i];
error += (u1[i] - u1k) * (u1[i] - u1k) +
(u2[i] - u2k) * (u2[i] - u2k);
}
error /= size;
// compute the gradient of the optical flow (Du1, Du2)
forward_gradient(u1, u1x, u1y, nx ,ny);
forward_gradient(u2, u2x, u2y, nx ,ny);
// estimate the values of the dual variable (p1, p2)
#pragma omp parallel for
for (int i = 0; i < size; i++)
{
const float taut = tau / theta;
const float g1 = hypot(u1x[i], u1y[i]);
const float g2 = hypot(u2x[i], u2y[i]);
const float ng1 = 1.0 + taut * g1;
const float ng2 = 1.0 + taut * g2;
p11[i] = (p11[i] + taut * u1x[i]) / ng1;
p12[i] = (p12[i] + taut * u1y[i]) / ng1;
p21[i] = (p21[i] + taut * u2x[i]) / ng2;
p22[i] = (p22[i] + taut * u2y[i]) / ng2;
}
}
if (verbose)
fprintf(stderr, "Warping: %d, "
"Iterations: %d, "
"Error: %f\n", warpings, n, error);
}
// delete allocated memory
free(I1x);
free(I1y);
free(I1w);
free(I1wx);
free(I1wy);
free(rho_c);
free(v1);
free(v2);
free(p11);
free(p12);
free(p21);
free(p22);
free(div);
free(grad);
free(div_p1);
free(div_p2);
free(u1x);
free(u1y);
free(u2x);
free(u2y);
}
/**
*
* Compute the max and min of an array
*
**/
static void getminmax(
float *min, // output min
float *max, // output max
const float *x, // input array
int n // array size
)
{
*min = *max = x[0];
for (int i = 1; i < n; i++) {
if (x[i] < *min)
*min = x[i];
if (x[i] > *max)
*max = x[i];
}
}
/**
*
* Function to normalize the images between 0 and 255
*
**/
void image_normalization(
const float *I0, // input image0
const float *I1, // input image1
float *I0n, // normalized output image0
float *I1n, // normalized output image1
int size // size of the image
)
{
float max0, max1, min0, min1;
// obtain the max and min of each image
getminmax(&min0, &max0, I0, size);
getminmax(&min1, &max1, I1, size);
// obtain the max and min of both images
const float max = (max0 > max1)? max0 : max1;
const float min = (min0 < min1)? min0 : min1;
const float den = max - min;
if (den > 0)
// normalize both images
for (int i = 0; i < size; i++)
{
I0n[i] = 255.0 * (I0[i] - min) / den;
I1n[i] = 255.0 * (I1[i] - min) / den;
}
else
// copy the original images
for (int i = 0; i < size; i++)
{
I0n[i] = I0[i];
I1n[i] = I1[i];
}
}
/**
*
* Function to compute the optical flow using multiple scales
*
**/
void Dual_TVL1_optic_flow_multiscale(
float *I0, // source image
float *I1, // target image
float *u1, // x component of the optical flow
float *u2, // y component of the optical flow
const int nxx, // image width
const int nyy, // image height
const float tau, // time step
const float lambda, // weight parameter for the data term
const float theta, // weight parameter for (u - v)²
const int nscales, // number of scales
const float zfactor, // factor for building the image piramid
const int warps, // number of warpings per scale
const float epsilon, // tolerance for numerical convergence
const bool verbose // enable/disable the verbose mode
)
{
int size = nxx * nyy;
// allocate memory for the pyramid structure
float **I0s = xmalloc(nscales * sizeof(float*));
float **I1s = xmalloc(nscales * sizeof(float*));
float **u1s = xmalloc(nscales * sizeof(float*));
float **u2s = xmalloc(nscales * sizeof(float*));
int *nx = xmalloc(nscales * sizeof(int));
int *ny = xmalloc(nscales * sizeof(int));
I0s[0] = xmalloc(size*sizeof(float));
I1s[0] = xmalloc(size*sizeof(float));
u1s[0] = u1;
u2s[0] = u2;
nx [0] = nxx;
ny [0] = nyy;
// normalize the images between 0 and 255
image_normalization(I0, I1, I0s[0], I1s[0], size);
// pre-smooth the original images
gaussian(I0s[0], nx[0], ny[0], PRESMOOTHING_SIGMA);
gaussian(I1s[0], nx[0], ny[0], PRESMOOTHING_SIGMA);
// create the scales
for (int s = 1; s < nscales; s++)
{
zoom_size(nx[s-1], ny[s-1], &nx[s], &ny[s], zfactor);
const int sizes = nx[s] * ny[s];
// allocate memory
I0s[s] = xmalloc(sizes*sizeof(float));
I1s[s] = xmalloc(sizes*sizeof(float));
u1s[s] = xmalloc(sizes*sizeof(float));
u2s[s] = xmalloc(sizes*sizeof(float));
// zoom in the images to create the pyramidal structure
zoom_out(I0s[s-1], I0s[s], nx[s-1], ny[s-1], zfactor);
zoom_out(I1s[s-1], I1s[s], nx[s-1], ny[s-1], zfactor);
}
// initialize the flow at the coarsest scale
for (int i = 0; i < nx[nscales-1] * ny[nscales-1]; i++)
u1s[nscales-1][i] = u2s[nscales-1][i] = 0.0;
// pyramidal structure for computing the optical flow
for (int s = nscales-1; s >= 0; s--)
{
if (verbose)
fprintf(stderr, "Scale %d: %dx%d\n", s, nx[s], ny[s]);
// compute the optical flow at the current scale
Dual_TVL1_optic_flow(
I0s[s], I1s[s], u1s[s], u2s[s], nx[s], ny[s],
tau, lambda, theta, warps, epsilon, verbose
);
// if this was the last scale, finish now
if (!s) break;
// otherwise, upsample the optical flow
// zoom the optical flow for the next finer scale
zoom_in(u1s[s], u1s[s-1], nx[s], ny[s], nx[s-1], ny[s-1]);
zoom_in(u2s[s], u2s[s-1], nx[s], ny[s], nx[s-1], ny[s-1]);
// scale the optical flow with the appropriate zoom factor
for (int i = 0; i < nx[s-1] * ny[s-1]; i++)
{
u1s[s-1][i] *= (float) 1.0 / zfactor;
u2s[s-1][i] *= (float) 1.0 / zfactor;
}
}
// delete allocated memory
for (int i = 1; i < nscales; i++)
{
free(I0s[i]);
free(I1s[i]);
free(u1s[i]);
free(u2s[i]);
}
free(I0s[0]);
free(I1s[0]);
free(I0s);
free(I1s);
free(u1s);
free(u2s);
free(nx);
free(ny);
}
#endif//DUAL_TVL1_OPTIC_FLOW_H
|
fft.c | /* Copyright 2013-2014. The Regents of the University of California.
* Copyright 2016-2018. Martin Uecker.
* Copyright 2018. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2011-2018 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2014 Frank Ong <frankong@berkeley.edu>
* 2018 Siddharth Iyer <ssi@mit.edu>
*
*
* FFT. It uses FFTW or CUFFT internally.
*
*
* Gauss, Carl F. 1805. "Nachlass: Theoria Interpolationis Methodo Nova
* Tractata." Werke 3, pp. 265-327, Königliche Gesellschaft der
* Wissenschaften, Göttingen, 1866
*/
#include <assert.h>
#include <complex.h>
#include <stdbool.h>
#include <math.h>
#include <fftw3.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/ops.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "fft.h"
#undef fft_plan_s
#ifdef USE_CUDA
#include "num/gpuops.h"
#include "fft-cuda.h"
#define LAZY_CUDA
#endif
void fftscale2(unsigned int N, const long dimensions[N], unsigned long flags, const long ostrides[N], complex float* dst, const long istrides[N], const complex float* src)
{
long fft_dims[N];
md_select_dims(N, flags, fft_dims, dimensions);
float scale = 1. / sqrtf((float)md_calc_size(N, fft_dims));
md_zsmul2(N, dimensions, ostrides, dst, istrides, src, scale);
}
void fftscale(unsigned int N, const long dims[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dims, CFL_SIZE);
fftscale2(N, dims, flags, strs, dst, strs, src);
}
static double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
static void fftmod2_r(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src, bool inv, double phase)
{
if (0 == flags) {
md_zsmul2(N, dims, ostrs, dst, istrs, src, cexp(M_PI * 2.i * (inv ? -phase : phase)));
return;
}
/* this will also currently be slow on the GPU because we do not
* support strides there on the lowest level */
unsigned int i = N - 1;
while (!MD_IS_SET(flags, i))
i--;
#if 1
// If there is only one dimensions left and it is the innermost
// which is contiguous optimize using md_zfftmod2
if ((0u == MD_CLEAR(flags, i)) && (1 == md_calc_size(i, dims))
&& (CFL_SIZE == ostrs[i]) && (CFL_SIZE == istrs[i])) {
md_zfftmod2(N - i, dims + i, ostrs + i, dst, istrs + i, src, inv, phase);
return;
}
#endif
long tdims[N];
md_select_dims(N, ~MD_BIT(i), tdims, dims);
#pragma omp parallel for
for (int j = 0; j < dims[i]; j++)
fftmod2_r(N, tdims, MD_CLEAR(flags, i),
ostrs, (void*)dst + j * ostrs[i], istrs, (void*)src + j * istrs[i],
inv, phase + fftmod_phase(dims[i], j));
}
static unsigned long clear_singletons(unsigned int N, const long dims[N], unsigned long flags)
{
return (0 == N) ? flags : clear_singletons(N - 1, dims, (1 == dims[N - 1]) ? MD_CLEAR(flags, N - 1) : flags);
}
void fftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, false, 0.);
}
/*
* The correct usage is fftmod before and after fft and
* ifftmod before and after ifft (this is different from
* how fftshift/ifftshift has to be used)
*/
void ifftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, true, 0.);
}
void fftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftmod2(N, dimensions, flags, strs, dst, strs, src);
}
void ifftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftmod2(N, dimensions, flags, strs, dst, strs, src);
}
void ifftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] - dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void ifftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftshift2(N, dimensions, flags, strs, dst, strs, src);
}
void fftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void fftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftshift2(N, dimensions, flags, strs, dst, strs, src);
}
struct fft_plan_s {
INTERFACE(operator_data_t);
fftwf_plan fftw;
#ifdef USE_CUDA
#ifdef LAZY_CUDA
unsigned int D;
unsigned long flags;
bool backwards;
const long* dims;
const long* istrs;
const long* ostrs;
#endif
struct fft_cuda_plan_s* cuplan;
#endif
};
static DEF_TYPEID(fft_plan_s);
static char* fftw_wisdom_name(unsigned int N, bool backwards, unsigned int flags, const long dims[N])
{
char* tbpath = getenv("TOOLBOX_PATH");
if (NULL == tbpath)
return NULL;
char* loc = NULL;
// Space for path and null terminator.
size_t space = snprintf(loc, 0, "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags);
// Space for dimensions.
for (size_t idx = 0; idx < N; idx ++)
space += snprintf(loc, 0, "_%lu", dims[idx]);
// Space for extension.
space += snprintf(loc, 0, ".fftw");
// Space for null terminator.
space += 1;
loc = calloc(space, sizeof(char));
if (NULL == loc)
error("memory out");
sprintf(loc , "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags);
char tmp[64];
for (size_t idx = 0; idx < N; idx++) {
sprintf(tmp, "_%lu", dims[idx]);
strcat(loc, tmp);
}
sprintf(tmp, ".fftw");
strcat(loc, tmp);
loc[space - 1] = '\0';
return loc;
}
static fftwf_plan fft_fftwf_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards, bool measure)
{
fftwf_plan fftwf;
unsigned int N = D;
fftwf_iodim64 dims[N];
fftwf_iodim64 hmdims[N];
unsigned int k = 0;
unsigned int l = 0;
char* wisdom = fftw_wisdom_name(D, backwards, flags, dimensions);
if (NULL != wisdom)
fftwf_import_wisdom_from_filename(wisdom);
//FFTW seems to be fine with this
//assert(0 != flags);
for (unsigned int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
dims[k].n = dimensions[i];
dims[k].is = istrides[i] / CFL_SIZE;
dims[k].os = ostrides[i] / CFL_SIZE;
k++;
} else {
hmdims[l].n = dimensions[i];
hmdims[l].is = istrides[i] / CFL_SIZE;
hmdims[l].os = ostrides[i] / CFL_SIZE;
l++;
}
}
#pragma omp critical
fftwf = fftwf_plan_guru64_dft(k, dims, l, hmdims, (complex float*)src, dst,
backwards ? 1 : (-1), measure ? FFTW_MEASURE : FFTW_ESTIMATE);
if (NULL != wisdom)
fftwf_export_wisdom_to_filename(wisdom);
md_free(wisdom);
return fftwf;
}
static void fft_apply(const operator_data_t* _plan, unsigned int N, void* args[N])
{
complex float* dst = args[0];
const complex float* src = args[1];
const auto plan = CAST_DOWN(fft_plan_s, _plan);
assert(2 == N);
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
#ifdef LAZY_CUDA
if (NULL == plan->cuplan)
((struct fft_plan_s*)plan)->cuplan = fft_cuda_plan(plan->D, plan->dims, plan->flags, plan->ostrs, plan->istrs, plan->backwards);
#endif
assert(NULL != plan->cuplan);
fft_cuda_exec(plan->cuplan, dst, src);
} else
#endif
{
assert(NULL != plan->fftw);
fftwf_execute_dft(plan->fftw, (complex float*)src, dst);
}
}
static void fft_free_plan(const operator_data_t* _data)
{
const auto plan = CAST_DOWN(fft_plan_s, _data);
fftwf_destroy_plan(plan->fftw);
#ifdef USE_CUDA
#ifdef LAZY_CUDA
xfree(plan->dims);
xfree(plan->istrs);
xfree(plan->ostrs);
#endif
if (NULL != plan->cuplan)
fft_cuda_free_plan(plan->cuplan);
#endif
xfree(plan);
}
const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards)
{
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
complex float* src = md_alloc(D, dimensions, CFL_SIZE);
complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE);
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true);
md_free(src);
if (!inplace)
md_free(dst);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src))
plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards);
#else
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, strides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, strides);
plan->ostrs = *PTR_PASS(ostrs);
#endif
#endif
return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards)
{
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src))
plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards);
#else
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, istrides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, ostrides);
plan->ostrs = *PTR_PASS(ostrs);
#endif
#endif
return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src, bool backwards)
{
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
return fft_create2(D, dimensions, flags, strides, dst, strides, src, backwards);
}
void fft_exec(const struct operator_s* o, complex float* dst, const complex float* src)
{
operator_apply_unchecked(o, dst, src);
}
void fft_free(const struct operator_s* o)
{
operator_free(o);
}
void fft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftmod(D, dimensions, flags, dst, src);
fft(D, dimensions, flags, dst, dst);
fftmod(D, dimensions, flags, dst, dst);
}
void ifftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftmod(D, dimensions, flags, dst, src);
ifft(D, dimensions, flags, dst, dst);
ifftmod(D, dimensions, flags, dst, dst);
}
void fftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
fft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
fftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
ifft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
ifftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
bool fft_threads_init = false;
void fft_set_num_threads(unsigned int n)
{
#ifdef FFTWTHREADS
#pragma omp critical
if (!fft_threads_init) {
fft_threads_init = true;
fftwf_init_threads();
}
#pragma omp critical
fftwf_plan_with_nthreads(n);
#else
UNUSED(n);
#endif
}
|
vector.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include <stdlib.h>
#include <string.h>
#include "../error/error.h"
#include <numa.h>
/**
* Initialize a new value vector
*
* @param vec a valid pointer to an uninitialized sptValueVector variable,
* @param len number of values to create
* @param cap total number of values to reserve
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptNewValueVector(sptValueVector *vec, sptNnzIndex len, sptNnzIndex cap) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->data = malloc(cap * sizeof *vec->data);
spt_CheckOSError(!vec->data, "ValVec New");
memset(vec->data, 0, cap * sizeof *vec->data);
return 0;
}
//Numa
int sptNewValueVectorNuma(sptValueVector *vec, sptNnzIndex len, sptNnzIndex cap, int nume_node) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->data = numa_alloc_onnode(cap * sizeof *vec->data, nume_node);
//spt_CheckOSError(!vec->data, "ValVec New");
//memset(vec->data, 0, cap * sizeof *vec->data);
return 0;
}
/**
* Fill an existed dense value vector with a specified constant
*
* @param vec a valid pointer to an existed sptVector variable,
* @param val a given value constant
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptConstantValueVector(sptValueVector * const vec, sptValue const val) {
for(sptNnzIndex i=0; i<vec->len; ++i)
vec->data[i] = val;
return 0;
}
/**
* Copy a value vector to an uninitialized value vector
*
* @param dest a pointer to an uninitialized value vector
* @param src a pointer to an existing valid value vector
*
* The contents of `src` will be copied to `dest`.
*/
int sptCopyValueVector(sptValueVector *dest, const sptValueVector *src, int const nt) {
int result = sptNewValueVector(dest, src->len, src->len);
spt_CheckError(result, "ValVec Copy", NULL);
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for num_threads(nt)
for (sptNnzIndex i=0; i<src->len; ++i) {
dest->data[i] = src->data[i];
}
#else
memcpy(dest->data, src->data, src->len * sizeof *src->data);
#endif
return 0;
}
/**
* Add a value to the end of a value vector
*
* @param vec a pointer to a valid value vector
* @param value the value to be appended
*
* The length of the value vector will be changed to contain the new value.
*/
int sptAppendValueVector(sptValueVector *vec, sptValue const value) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
sptValue *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "ValVec Append");
vec->cap = newcap;
vec->data = newdata;
}
vec->data[vec->len] = value;
++vec->len;
return 0;
}
//numa
int sptAppendValueVectorNuma(sptValueVector *vec, sptValue const value) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
sptValue *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "ValVec Append");
vec->cap = newcap;
vec->data = newdata;
}
vec->data[vec->len] = value;
++vec->len;
return 0;
}
/**
* Add a value to the end of a value vector
*
* @param vec a pointer to a valid value vector
* @param append_vec a pointer to another value vector, containing the values to be appended
*
* The values from `append_vec` will be appended to `vec`.
*/
int sptAppendValueVectorWithVector(sptValueVector *vec, const sptValueVector *append_vec) {
sptNnzIndex newlen = vec->len + append_vec->len;
if(vec->cap <= newlen) {
sptNnzIndex newcap = vec->cap + append_vec->cap;
sptValue *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "ValVec Append ValVec");
vec->cap = newcap;
vec->data = newdata;
}
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[vec->len + i] = append_vec->data[i];
}
vec->len = newlen;
return 0;
}
// With numa
int sptAppendValueVectorWithVectorNuma(sptValueVector *vec, const sptValueVector *append_vec) {
sptNnzIndex newlen = vec->len + append_vec->len;
if(vec->cap <= newlen) {
sptNnzIndex newcap = vec->cap + append_vec->cap;
sptValue *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "ValVec Append ValVec");
vec->cap = newcap;
vec->data = newdata;
}
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[vec->len + i] = append_vec->data[i];
}
vec->len = newlen;
return 0;
}
// With numa and start location
int sptAppendValueVectorWithVectorStartFromNuma(sptValueVector *vec, const sptValueVector *append_vec, unsigned long long start) {
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[start + i] = append_vec->data[i];
}
return 0;
}
/**
* Resize a value vector
*
* @param vec the value vector to resize
* @param size the new size of the value vector
*
* If the new size is larger than the current size, new values will be appended
* but the values of them are undefined. If the new size if smaller than the
* current size, values at the end will be truncated.
*/
int sptResizeValueVector(sptValueVector *vec, sptNnzIndex const size) {
sptNnzIndex newcap = size < 2 ? 2 : size;
if(newcap != vec->cap) {
sptValue *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "ValVec Resize");
vec->len = size;
vec->cap = newcap;
vec->data = newdata;
} else {
vec->len = size;
}
return 0;
}
//numa
int sptResizeValueVectorNuma(sptValueVector *vec, sptNnzIndex const size) {
sptNnzIndex newcap = size < 2 ? 2 : size;
if(newcap != vec->cap) {
sptValue *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "ValVec Resize");
vec->len = size;
vec->cap = newcap;
vec->data = newdata;
} else {
vec->len = size;
}
return 0;
}
/**
* Release the memory buffer a value vector is holding
*
* @param vec a pointer to a valid value vector
*
*/
void sptFreeValueVector(sptValueVector *vec) {
vec->len = 0;
vec->cap = 0;
free(vec->data);
}
/*
* Initialize a new sptIndex vector
*
* @param vec a valid pointer to an uninitialized sptIndex variable,
* @param len number of values to create
* @param cap total number of values to reserve
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptNewIndexVector(sptIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->data = malloc(cap * sizeof *vec->data);
spt_CheckOSError(!vec->data, "IdxVec New");
memset(vec->data, 0, cap * sizeof *vec->data);
return 0;
}
//Numa
int sptNewIndexVectorNuma(sptIndexVector *vec, sptNnzIndex len, sptNnzIndex cap, int numa_node) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->data = numa_alloc_onnode(cap * sizeof *vec->data, numa_node);
//spt_CheckOSError(!vec->data, "IdxVec New");
//memset(vec->data, 0, cap * sizeof *vec->data);
return 0;
}
/**
* Fill an existed dense index vector with a specified constant
*
* @param vec a valid pointer to an existed sptIndexVector variable,
* @param num a given value constant
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptConstantIndexVector(sptIndexVector * const vec, sptIndex const num) {
for(sptNnzIndex i=0; i<vec->len; ++i)
vec->data[i] = num;
return 0;
}
/**
* Copy an index vector to an uninitialized index vector
*
* @param dest a pointer to an uninitialized index vector
* @param src a pointer to an existing valid index vector
*
* The contents of `src` will be copied to `dest`.
*/
int sptCopyIndexVector(sptIndexVector *dest, const sptIndexVector *src, int const nt) {
int result = sptNewIndexVector(dest, src->len, src->len);
spt_CheckError(result, "IdxVec Copy", NULL);
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for num_threads(nt)
for (sptNnzIndex i=0; i<src->len; ++i) {
dest->data[i] = src->data[i];
}
#else
memcpy(dest->data, src->data, src->len * sizeof *src->data);
#endif
return 0;
}
/**
* Add a value to the end of a sptIndexVector
*
* @param vec a pointer to a valid index vector
* @param value the value to be appended
*
* The length of the size vector will be changed to contain the new value.
*/
int sptAppendIndexVector(sptIndexVector *vec, sptIndex const value) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
sptIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "IdxVec Append");
vec->cap = newcap;
vec->data = newdata;
}
vec->data[vec->len] = value;
++vec->len;
return 0;
}
//numa
int sptAppendIndexVectorNuma(sptIndexVector *vec, sptIndex const value) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
sptIndex *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "IdxVec Append");
vec->cap = newcap;
vec->data = newdata;
}
vec->data[vec->len] = value;
++vec->len;
return 0;
}
/**
* Add a value to the end of an index vector
*
* @param vec a pointer to a valid index vector
* @param append_vec a pointer to another index vector, containing the values to be appended
*
* The values from `append_vec` will be appended to `vec`.
*/
int sptAppendIndexVectorWithVector(sptIndexVector *vec, const sptIndexVector *append_vec) {
sptNnzIndex newlen = vec->len + append_vec->len;
if(vec->cap <= newlen) {
sptNnzIndex newcap = vec->cap + append_vec->cap;
sptIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "IdxVec Append IdxVec");
vec->cap = newcap;
vec->data = newdata;
}
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[vec->len + i] = append_vec->data[i];
}
vec->len = newlen;
return 0;
}
//numa
int sptAppendIndexVectorWithVectorNuma(sptIndexVector *vec, const sptIndexVector *append_vec) {
sptNnzIndex newlen = vec->len + append_vec->len;
if(vec->cap <= newlen) {
sptNnzIndex newcap = vec->cap + append_vec->cap;
sptIndex *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "IdxVec Append IdxVec");
vec->cap = newcap;
vec->data = newdata;
}
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[vec->len + i] = append_vec->data[i];
}
vec->len = newlen;
return 0;
}
//numa
int sptAppendIndexVectorWithVectorStartFromNuma(sptIndexVector *vec, const sptIndexVector *append_vec, unsigned long long start) {
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[start + i] = append_vec->data[i];
}
return 0;
}
/**
* Resize an index vector
*
* @param vec the index vector to resize
* @param size the new size of the index vector
*
* If the new size is larger than the current size, new values will be appended
* but the values of them are undefined. If the new size if smaller than the
* current size, values at the end will be truncated.
*/
int sptResizeIndexVector(sptIndexVector *vec, sptNnzIndex const size) {
sptNnzIndex newcap = size < 2 ? 2 : size;
if(newcap != vec->cap) {
sptIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "IdxVec Resize");
vec->len = size;
vec->cap = newcap;
vec->data = newdata;
} else {
vec->len = size;
}
return 0;
}
//numa
int sptResizeIndexVectorNuma(sptIndexVector *vec, sptNnzIndex const size) {
sptNnzIndex newcap = size < 2 ? 2 : size;
if(newcap != vec->cap) {
sptIndex *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "IdxVec Resize");
vec->len = size;
vec->cap = newcap;
vec->data = newdata;
} else {
vec->len = size;
}
return 0;
}
long int sptInIndexVector(sptIndexVector * inds, sptNnzIndex nmodes, sptNnzIndex nnz, sptIndexVector * cand_inds) {
int mark;
for (sptNnzIndex i = 0; i < nnz; ++i) {
mark = 1;
for(sptIndex m = 0; m < nmodes; ++m) {
if(cand_inds->data[m] != inds[m].data[i] ) {
mark = 0;
break; // no need to compare other modes
}
}
if (mark == 1) return i;
}
return -1;
}
/**
* Release the memory buffer a sptIndexVector is holding
*
* @param vec a pointer to a valid size vector
*
*/
void sptFreeIndexVector(sptIndexVector *vec) {
free(vec->data);
vec->len = 0;
vec->cap = 0;
}
/*
* Initialize a new sptElementIndexVector vector
*
* @param vec a valid pointer to an uninitialized sptElementIndex variable,
* @param len number of values to create
* @param cap total number of values to reserve
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptNewElementIndexVector(sptElementIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->data = malloc(cap * sizeof *vec->data);
spt_CheckOSError(!vec->data, "EleIdxVec New");
memset(vec->data, 0, cap * sizeof *vec->data);
return 0;
}
/**
* Fill an existed dense element index vector with a specified constant
*
* @param vec a valid pointer to an existed sptElementIndexVector variable,
* @param num a given value constant
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptConstantElementIndexVector(sptElementIndexVector * const vec, sptElementIndex const num) {
for(sptNnzIndex i=0; i<vec->len; ++i)
vec->data[i] = num;
return 0;
}
/**
* Copy an element index vector to an uninitialized element index vector
*
* @param dest a pointer to an uninitialized element index vector
* @param src a pointer to an existing valid element index vector
*
* The contents of `src` will be copied to `dest`.
*/
int sptCopyElementIndexVector(sptElementIndexVector *dest, const sptElementIndexVector *src) {
int result = sptNewElementIndexVector(dest, src->len, src->len);
spt_CheckError(result, "EleIdxVec Copy", NULL);
memcpy(dest->data, src->data, src->len * sizeof *src->data);
return 0;
}
/**
* Add a value to the end of a sptElementIndexVector
*
* @param vec a pointer to a valid element index vector
* @param value the value to be appended
*
* The length of the element index vector will be changed to contain the new value.
*/
int sptAppendElementIndexVector(sptElementIndexVector *vec, sptElementIndex const value) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
sptElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "EleIdxVec Append");
vec->cap = newcap;
vec->data = newdata;
}
vec->data[vec->len] = value;
++vec->len;
return 0;
}
/**
* Add a value to the end of an element index vector
*
* @param vec a pointer to a valid element index vector
* @param append_vec a pointer to another element index vector, containing the values to be appended
*
* The values from `append_vec` will be appended to `vec`.
*/
int sptAppendElementIndexVectorWithVector(sptElementIndexVector *vec, const sptElementIndexVector *append_vec) {
sptNnzIndex newlen = vec->len + append_vec->len;
if(vec->cap <= newlen) {
sptNnzIndex newcap = vec->cap + append_vec->cap;
sptElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "EleIdxVec Append EleIdxVec");
vec->cap = newcap;
vec->data = newdata;
}
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[vec->len + i] = append_vec->data[i];
}
vec->len = newlen;
return 0;
}
/**
* Resize a element index vector
*
* @param vec the element index vector to resize
* @param size the new size of the element index vector
*
* If the new size is larger than the current size, new values will be appended
* but the values of them are undefined. If the new size if smaller than the
* current size, values at the end will be truncated.
*/
int sptResizeElementIndexVector(sptElementIndexVector *vec, sptNnzIndex const size) {
sptNnzIndex newcap = size < 2 ? 2 : size;
if(newcap != vec->cap) {
sptElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "EleIdxVec Resize");
vec->len = size;
vec->cap = newcap;
vec->data = newdata;
} else {
vec->len = size;
}
return 0;
}
/**
* Release the memory buffer a sptElementIndexVector is holding
*
* @param vec a pointer to a valid size vector
*
*/
void sptFreeElementIndexVector(sptElementIndexVector *vec) {
free(vec->data);
vec->len = 0;
vec->cap = 0;
}
/*
* Initialize a new sptBlockIndexVector vector
*
* @param vec a valid pointer to an uninitialized sptBlockIndex variable,
* @param len number of values to create
* @param cap total number of values to reserve
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptNewBlockIndexVector(sptBlockIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->data = malloc(cap * sizeof *vec->data);
spt_CheckOSError(!vec->data, "BlkIdxVec New");
memset(vec->data, 0, cap * sizeof *vec->data);
return 0;
}
/**
* Fill an existed dense element index vector with a specified constant
*
* @param vec a valid pointer to an existed sptBlockIndexVector variable,
* @param num a given value constant
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptConstantBlockIndexVector(sptBlockIndexVector * const vec, sptBlockIndex const num) {
for(sptNnzIndex i=0; i<vec->len; ++i)
vec->data[i] = num;
return 0;
}
/**
* Copy a block index vector to an uninitialized block index vector
*
* @param dest a pointer to an uninitialized block index vector
* @param src a pointer to an existing valid block index vector
*
* The contents of `src` will be copied to `dest`.
*/
int sptCopyBlockIndexVector(sptBlockIndexVector *dest, const sptBlockIndexVector *src) {
int result = sptNewBlockIndexVector(dest, src->len, src->len);
spt_CheckError(result, "BlkIdxVec Copy", NULL);
memcpy(dest->data, src->data, src->len * sizeof *src->data);
return 0;
}
/**
* Add a value to the end of a sptBlockIndexVector
*
* @param vec a pointer to a valid block index vector
* @param value the value to be appended
*
* The length of the block index vector will be changed to contain the new value.
*/
int sptAppendBlockIndexVector(sptBlockIndexVector *vec, sptBlockIndex const value) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
sptBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "BlkIdxVec Append");
vec->cap = newcap;
vec->data = newdata;
}
vec->data[vec->len] = value;
++vec->len;
return 0;
}
/**
* Add a value to the end of a block index vector
*
* @param vec a pointer to a valid block index vector
* @param append_vec a pointer to another block index vector, containing the values to be appended
*
* The values from `append_vec` will be appended to `vec`.
*/
int sptAppendBlockIndexVectorWithVector(sptBlockIndexVector *vec, const sptBlockIndexVector *append_vec) {
sptNnzIndex newlen = vec->len + append_vec->len;
if(vec->cap <= newlen) {
sptNnzIndex newcap = vec->cap + append_vec->cap;
sptBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "BlkIdxVec Append BlkIdxVec");
vec->cap = newcap;
vec->data = newdata;
}
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[vec->len + i] = append_vec->data[i];
}
vec->len = newlen;
return 0;
}
/**
* Resize a block index vector
*
* @param vec the block index vector to resize
* @param size the new size of the block index vector
*
* If the new size is larger than the current size, new values will be appended
* but the values of them are undefined. If the new size if smaller than the
* current size, values at the end will be truncated.
*/
int sptResizeBlockIndexVector(sptBlockIndexVector *vec, sptNnzIndex const size) {
sptNnzIndex newcap = size < 2 ? 2 : size;
if(newcap != vec->cap) {
sptBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "BlkIdxVec Resize");
vec->len = size;
vec->cap = newcap;
vec->data = newdata;
} else {
vec->len = size;
}
return 0;
}
/**
* Release the memory buffer a sptBlockIndexVector is holding
*
* @param vec a pointer to a valid size vector
*
*/
void sptFreeBlockIndexVector(sptBlockIndexVector *vec) {
free(vec->data);
vec->len = 0;
vec->cap = 0;
}
/*
* Initialize a new sptNnzIndexVector vector
*
* @param vec a valid pointer to an uninitialized sptNnzIndex variable,
* @param len number of values to create
* @param cap total number of values to reserve
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptNewNnzIndexVector(sptNnzIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->data = malloc(cap * sizeof *vec->data);
spt_CheckOSError(!vec->data, "NnzIdxVec New");
memset(vec->data, 0, cap * sizeof *vec->data);
return 0;
}
/**
* Fill an existed dense long nnz index vector with a specified constant
*
* @param vec a valid pointer to an existed sptNnzIndexVector variable,
* @param num a given value constant
*
* Vector is a type of one-dimentional array with dynamic length
*/
int sptConstantNnzIndexVector(sptNnzIndexVector * const vec, sptNnzIndex const num) {
for(sptNnzIndex i=0; i<vec->len; ++i)
vec->data[i] = num;
return 0;
}
/**
* Copy a long nnz index vector to an uninitialized long nnz index vector
*
* @param dest a pointer to an uninitialized long nnz index vector
* @param src a pointer to an existing valid long nnz index vector
*
* The contents of `src` will be copied to `dest`.
*/
int sptCopyNnzIndexVector(sptNnzIndexVector *dest, const sptNnzIndexVector *src) {
int result = sptNewNnzIndexVector(dest, src->len, src->len);
spt_CheckError(result, "NnzIdxVec Copy", NULL);
memcpy(dest->data, src->data, src->len * sizeof *src->data);
return 0;
}
/**
* Add a value to the end of a sptNnzIndexVector
*
* @param vec a pointer to a valid long nnz index vector
* @param value the value to be appended
*
* The length of the long nnz index vector will be changed to contain the new value.
*/
int sptAppendNnzIndexVector(sptNnzIndexVector *vec, sptNnzIndex const value) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
sptNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "NnzIdxVec Append");
vec->cap = newcap;
vec->data = newdata;
}
vec->data[vec->len] = value;
++vec->len;
return 0;
}
/**
* Add a value to the end of a long nnz index vector
*
* @param vec a pointer to a valid long nnz index vector
* @param append_vec a pointer to another long nnz index vector, containing the values to be appended
*
* The values from `append_vec` will be appended to `vec`.
*/
int sptAppendNnzIndexVectorWithVector(sptNnzIndexVector *vec, const sptNnzIndexVector *append_vec) {
sptNnzIndex newlen = vec->len + append_vec->len;
if(vec->cap <= newlen) {
sptNnzIndex newcap = vec->cap + append_vec->cap;
sptNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "NnzIdxVec Append NnzIdxVec");
vec->cap = newcap;
vec->data = newdata;
}
for(sptNnzIndex i=0; i<append_vec->len; ++i) {
vec->data[vec->len + i] = append_vec->data[i];
}
vec->len = newlen;
return 0;
}
/**
* Resize a long nnz index vector
*
* @param vec the long nnz index vector to resize
* @param size the new size of the long nnz index vector
*
* If the new size is larger than the current size, new values will be appended
* but the values of them are undefined. If the new size if smaller than the
* current size, values at the end will be truncated.
*/
int sptResizeNnzIndexVector(sptNnzIndexVector *vec, sptNnzIndex const size) {
sptNnzIndex newcap = size < 2 ? 2 : size;
if(newcap != vec->cap) {
sptNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data);
spt_CheckOSError(!newdata, "NnzIdxVec Resize");
vec->len = size;
vec->cap = newcap;
vec->data = newdata;
} else {
vec->len = size;
}
return 0;
}
/**
* Release the memory buffer a sptNnzIndexVector is holding
*
* @param vec a pointer to a valid long nnz vector
*
*/
void sptFreeNnzIndexVector(sptNnzIndexVector *vec) {
free(vec->data);
vec->len = 0;
vec->cap = 0;
}
|
bincrs_func.c | #define _XOPEN_SOURCE 500
#include <stdlib.h>
#include <stdio.h>
#include "ghost/util.h"
#include "ghost/bincrs.h"
#include "ghost/machine.h"
#include "ghost/locality.h"
static inline uint32_t bswap_32(uint32_t val)
{
return ((val & (uint32_t)0x000000ffUL) << 24)
| ((val & (uint32_t)0x0000ff00UL) << 8)
| ((val & (uint32_t)0x00ff0000UL) >> 8)
| ((val & (uint32_t)0xff000000UL) >> 24);
}
static inline uint64_t bswap_64(uint64_t val)
{
return ((val & (uint64_t)0x00000000000000ffULL) << 56)
| ((val & (uint64_t)0x000000000000ff00ULL) << 40)
| ((val & (uint64_t)0x0000000000ff0000ULL) << 24)
| ((val & (uint64_t)0x00000000ff000000ULL) << 8)
| ((val & (uint64_t)0x000000ff00000000ULL) >> 8)
| ((val & (uint64_t)0x0000ff0000000000ULL) >> 24)
| ((val & (uint64_t)0x00ff000000000000ULL) >> 40)
| ((val & (uint64_t)0xff00000000000000ULL) >> 56);
}
#define SWAPREQ(header) (header.endianess == GHOST_BINCRS_LITTLE_ENDIAN)?ghost_machine_bigendian()?1:0:ghost_machine_bigendian()?0:1
int ghost_sparsemat_rowfunc_bincrs(ghost_gidx row, ghost_lidx *rowlen, ghost_gidx *col, void *val, void *arg)
{
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_IO);
static ghost_gidx *colInd = NULL, *globalRowPtr = NULL, *rowPtr = NULL;
static char *values = NULL;
static size_t dtsize = 0;
static ghost_gidx firstrow = 0;
static ghost_lidx nrows = 0;
if (row == GHOST_SPARSEMAT_ROWFUNC_BINCRS_ROW_GETDIM) {
ghost_bincrs_header_t header;
ghost_sparsemat_rowfunc_file_initargs args =
*(ghost_sparsemat_rowfunc_file_initargs *)arg;
char *filename = args.filename;
ghost_bincrs_header_read(&header,filename);
col[0] = header.nrows;
col[1] = header.ncols;
if(rowlen) *rowlen = header.datatype;
} else if (row == GHOST_SPARSEMAT_ROWFUNC_INIT) {
ghost_sparsemat_rowfunc_file_initargs args =
*(ghost_sparsemat_rowfunc_file_initargs *)arg;
char *filename = args.filename;
ghost_datatype matdt = args.dt;
ghost_datatype_size(&dtsize,matdt);
ghost_bincrs_header_t header;
ghost_bincrs_header_read(&header,filename);
FILE *f;
ghost_gidx i;
size_t ret;
if ((f = fopen64(filename,"r")) == NULL) {
GHOST_ERROR_LOG("fopen with %s failed!",filename);
return 1;
}
if ((ghost_datatype)(header.datatype) != matdt) {
GHOST_ERROR_LOG("Value casting not implemented! Adjust your sparsemat datatype to match the file!");
return 1;
}
if (header.symmetry != GHOST_BINCRS_SYMM_GENERAL) {
GHOST_ERROR_LOG("Only general matrices supported at the moment!");
return 1;
}
if (fseeko(f,GHOST_BINCRS_SIZE_HEADER,SEEK_SET)) {
GHOST_ERROR_LOG("Seek failed");
return GHOST_ERR_IO;
}
int me;
ghost_sparsemat *mat = args.mat;
ghost_rank(&me,mat->context->mpicomm);
firstrow = mat->context->row_map->goffs[me];
nrows = mat->context->row_map->ldim[me];
ghost_malloc((void **)&rowPtr,(nrows + 1) * sizeof(ghost_gidx));
if (fseeko(f,firstrow*GHOST_BINCRS_SIZE_RPT_EL,SEEK_CUR)) {
GHOST_ERROR_LOG("Seek failed");
return GHOST_ERR_IO;
}
if ((ret = fread(rowPtr, GHOST_BINCRS_SIZE_RPT_EL, nrows+1,f)) != (size_t)(nrows+1)){
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
ghost_lidx nnz = (ghost_lidx)(rowPtr[nrows]-rowPtr[0]);
ghost_malloc((void **)&colInd,nnz * sizeof(ghost_gidx));
ghost_malloc((void **)&values,nnz * dtsize);
#pragma omp parallel for
for(i=0; i < nrows; ++i){
values[rowPtr[i]-rowPtr[0]] = 0;
colInd[rowPtr[i]-rowPtr[0]] = 0;
}
if (fseeko(f,GHOST_BINCRS_SIZE_HEADER+(header.nrows+1)*GHOST_BINCRS_SIZE_RPT_EL+rowPtr[0]*GHOST_BINCRS_SIZE_COL_EL,SEEK_SET)) {
GHOST_ERROR_LOG("Seek failed");
return GHOST_ERR_IO;
}
if ((ret = fread(colInd, GHOST_BINCRS_SIZE_COL_EL, nnz,f)) != (size_t)(nnz)){
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (fseeko(f,GHOST_BINCRS_SIZE_HEADER+(header.nrows+1)*GHOST_BINCRS_SIZE_RPT_EL+header.nnz*GHOST_BINCRS_SIZE_COL_EL+rowPtr[0]*dtsize,SEEK_SET)) {
GHOST_ERROR_LOG("Seek failed");
return GHOST_ERR_IO;
}
if ((ret = fread(values, dtsize, nnz,f)) != (size_t)(nnz)){
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
fclose(f);
} else if (row == GHOST_SPARSEMAT_ROWFUNC_FINALIZE) {
free(colInd);
free(rowPtr);
free(globalRowPtr);
free(values);
} else {
*rowlen = rowPtr[row-firstrow+1]-rowPtr[row-firstrow];
memcpy(col,&colInd[rowPtr[row-firstrow]-rowPtr[0]],(*rowlen)*sizeof(ghost_gidx));
memcpy(val,&values[(rowPtr[row-firstrow]-rowPtr[0])*dtsize],(*rowlen)*dtsize);
}
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_IO);
return 0;
}
ghost_error ghost_bincrs_header_read(ghost_bincrs_header_t *header, char *matrixPath)
{
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL|GHOST_FUNCTYPE_IO);
FILE* file;
long filesize;
int swapReq = 0;
size_t ret;
GHOST_DEBUG_LOG(1,"Reading header from %s",matrixPath);
if ((file = fopen(matrixPath, "rb"))==NULL){
GHOST_ERROR_LOG("Could not open binary CRS file %s: %s",matrixPath,strerror(errno));
return GHOST_ERR_IO;
}
fseek(file,0L,SEEK_END);
filesize = ftell(file);
fseek(file,0L,SEEK_SET);
if ((ret = fread(&header->endianess, 4, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (header->endianess == GHOST_BINCRS_LITTLE_ENDIAN && ghost_machine_bigendian()) {
GHOST_DEBUG_LOG(1,"Need to convert from little to big endian.");
swapReq = 1;
} else if (header->endianess != GHOST_BINCRS_LITTLE_ENDIAN && !ghost_machine_bigendian()) {
GHOST_DEBUG_LOG(1,"Need to convert from big to little endian.");
swapReq = 1;
} else {
GHOST_DEBUG_LOG(1,"OK, file and library have same endianess.");
}
if ((ret = fread(&header->version, 4, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (swapReq) header->version = bswap_32(header->version);
if ((ret = fread(&header->base, 4, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (swapReq) header->base = bswap_32(header->base);
if ((ret = fread(&header->symmetry, 4, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (swapReq) header->symmetry = bswap_32(header->symmetry);
if ((ret = fread(&header->datatype, 4, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (swapReq) header->datatype = bswap_32(header->datatype);
if ((ret = fread(&header->nrows, 8, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (swapReq) header->nrows = bswap_64(header->nrows);
if ((ret = fread(&header->ncols, 8, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (swapReq) header->ncols = bswap_64(header->ncols);
if ((ret = fread(&header->nnz, 8, 1, file)) != (size_t)1) {
GHOST_ERROR_LOG("fread failed: %s (%zu)",strerror(errno),ret);
return GHOST_ERR_IO;
}
if (swapReq) header->nnz = bswap_64(header->nnz);
size_t valSize;
GHOST_CALL_RETURN(ghost_datatype_size(&valSize,(ghost_datatype)header->datatype));
long rightFilesize = GHOST_BINCRS_SIZE_HEADER +
(long)(header->nrows+1) * GHOST_BINCRS_SIZE_RPT_EL +
(long)header->nnz * GHOST_BINCRS_SIZE_COL_EL +
(long)header->nnz * valSize;
if (filesize != rightFilesize) {
GHOST_ERROR_LOG("File has invalid size! (is: %ld, should be: %ld)",filesize, rightFilesize);
return GHOST_ERR_IO;
}
fclose(file);
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL|GHOST_FUNCTYPE_IO);
return GHOST_SUCCESS;
}
|
GB_binop__bset_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int32)
// C=scalar+B GB (_bind1st__bset_int32)
// C=scalar+B' GB (_bind1st_tran__bset_int32)
// C=A+scalar GB (_bind2nd__bset_int32)
// C=A'+scalar GB (_bind2nd_tran__bset_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_BITSET (aij, bij, int32_t, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT32 || GxB_NO_BSET_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bset_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_device.4.c | /*
* @@name: device.6c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.5
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <omp.h>
void get_dev_cos(double *mem, size_t s)
{
int h, t, i;
double * mem_dev_cpy;
h = omp_get_initial_device();
t = omp_get_default_device();
if (omp_get_num_devices() < 1 || t < 0){
printf(" ERROR: No device found.\n");
exit(1);
}
mem_dev_cpy = (double *)omp_target_alloc( sizeof(double) * s, t);
if(mem_dev_cpy == NULL){
printf(" ERROR: No space left on device.\n");
exit(1);
}
/* dst src */
omp_target_memcpy(mem_dev_cpy, mem, sizeof(double)*s,
0, 0,
t, h);
#pragma omp target is_device_ptr(mem_dev_cpy) device(t)
#pragma omp teams distribute parallel for
for(i=0;i<s;i++){ mem_dev_cpy[i] = cos((double)i); } /* init data */
/* dst src */
omp_target_memcpy(mem, mem_dev_cpy, sizeof(double)*s,
0, 0,
h, t);
omp_target_free(mem_dev_cpy, t);
}
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p+center) == 0))
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveBlurImage)
#endif
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if (((sharp_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p+center) == 0))
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveSharpenImage)
#endif
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
register Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
register ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*(columns+2)+x_offset);
s=q-(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
register ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) ResetMagickMemory(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) ResetMagickMemory(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,kuwahara_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
register size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
register ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_KuwaharaImage)
#endif
proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanLinePixels,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanLinePixels_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanLinePixels));
if (scanLinePixels_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
register ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult),
q);
SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)*
mult),q);
SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*mult),
q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register MagickRealType
*magick_restrict k;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MotionBlurImage)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
extern const char
DefaultTileFrame[];
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->alpha_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",(double)
colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,
(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",radius,
sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",(double)
(percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
geometry.width=(size_t) (2*i+2);
geometry.height=(size_t) (2*i+2);
geometry.x=(i-1)/2;
geometry.y=(i-1)/2;
(void) RaiseImage(preview_image,&geometry,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*degrees,
2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",radius,
sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",radius,
sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%.20gb ",
factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) ||
(sin_theta == (double *) NULL))
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RotationalBlurImage)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
register ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
register const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
register const Quantum
*magick_restrict l,
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
register ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p+center) == 0))
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SelectiveBlurImage)
#endif
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,post)+
GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre)-
GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if (((shade_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(linear_image,center) == 0))
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadeImage)
#endif
proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SpreadImage)
#endif
proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if (((unsharp_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UnsharpMaskImage)
#endif
proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
exact_cover_hybrid_bfs.c | /**
* Version Hybride : MPI + OpenMP avec BFS
*
* Quentin Deschamps, 2021
*/
#include <ctype.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <stdlib.h>
#include <err.h>
#include <getopt.h>
#include <sys/time.h>
#include <mpi.h>
#include <omp.h>
/* Rang du processeur principal */
#define ROOT 0
/* Nombre minimum de tâches */
#define MIN_ROOT 500
#define MIN_WORKER 50
/* Item null pour appeler chosen_item sans cover */
#define NULL_ITEM -1
double start = 0.0;
char *in_filename = NULL; // nom du fichier contenant la matrice
bool print_solutions = false; // affiche chaque solution
long long report_delta = 1e6; // affiche un rapport tous les ... noeuds
long long next_report; // prochain rapport affiché au noeud...
long long max_solutions = 0x7fffffffffffffff; // stop après ... solutions
/* Variables de file */
struct context_t **queue; // file de contextes
int queue_front = 0; // tête de la file
int queue_rear = -1; // queue de la file
int queue_size = 0; // nombre de contextes dans la file
struct instance_t {
int n_items;
int n_primary;
int n_options;
char **item_name; // potentiellement NULL, sinon de taille n_items
int *options; // l'option i contient les objets options[ptr[i]:ptr[i+1]]
int *ptr; // taille n_options + 1
};
struct sparse_array_t {
int len; // nombre d'éléments stockés
int capacity; // taille maximale
int *p; // contenu de l'ensemble = p[0:len]
int *q; // taille capacity (tout comme p)
};
struct context_t {
struct sparse_array_t *active_items; // objets actifs
struct sparse_array_t **active_options; // options actives contenant l'objet i
int *chosen_options; // options choisies à ce stade
int *child_num; // numéro du fils exploré
int *num_children; // nombre de fils à explorer
int level; // nombre d'options choisies
long long nodes; // nombre de noeuds explorés
long long solutions; // nombre de solutions trouvées
};
static const char DIGITS[62] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z'};
double wtime()
{
struct timeval ts;
gettimeofday(&ts, NULL);
return (double) ts.tv_sec + ts.tv_usec / 1e6;
}
void usage(char **argv)
{
printf("%s --in FILENAME [OPTIONS]\n\n", argv[0]);
printf("Options:\n");
printf("--progress-report N display a message every N nodes (0 to disable)\n");
printf("--print-solutions display solutions when they are found\n");
printf("--stop-after N stop the search once N solutions are found\n");
exit(0);
}
bool item_is_primary(const struct instance_t *instance, int item)
{
return item < instance->n_primary;
}
void print_option(const struct instance_t *instance, int option)
{
if (instance->item_name == NULL)
errx(1, "tentative d'affichage sans noms d'objet");
for (int p = instance->ptr[option]; p < instance->ptr[option + 1]; p++) {
int item = instance->options[p];
printf("%s ", instance->item_name[item]);
}
printf("\n");
}
struct sparse_array_t * sparse_array_init(int n)
{
struct sparse_array_t *S = malloc(sizeof(*S));
if (S == NULL)
err(1, "impossible d'allouer un tableau creux");
S->len = 0;
S->capacity = n;
S->p = malloc(n * sizeof(int));
S->q = malloc(n * sizeof(int));
if (S->p == NULL || S->q == NULL)
err(1, "Impossible d'allouer p/q dans un tableau creux");
for (int i = 0; i < n; i++)
S->q[i] = n; // initialement vide
return S;
}
bool sparse_array_membership(const struct sparse_array_t *S, int x)
{
return (S->q[x] < S->len);
}
bool sparse_array_empty(const struct sparse_array_t *S)
{
return (S->len == 0);
}
void sparse_array_add(struct sparse_array_t *S, int x)
{
int i = S->len;
S->p[i] = x;
S->q[x] = i;
S->len = i + 1;
}
void sparse_array_remove(struct sparse_array_t *S, int x)
{
int j = S->q[x];
int n = S->len - 1;
// échange p[j] et p[n]
int y = S->p[n];
S->p[n] = x;
S->p[j] = y;
// met q à jour
S->q[x] = n;
S->q[y] = j;
S->len = n;
}
void sparse_array_unremove(struct sparse_array_t *S)
{
S->len++;
}
void sparse_array_unadd(struct sparse_array_t *S)
{
S->len--;
}
bool item_is_active(const struct context_t *ctx, int item)
{
return sparse_array_membership(ctx->active_items, item);
}
void solution_found(const struct instance_t *instance, struct context_t *ctx)
{
ctx->solutions++;
if (!print_solutions)
return;
printf("Trouvé une nouvelle solution au niveau %d après %lld noeuds\n",
ctx->level, ctx->nodes);
printf("Options : \n");
for (int i = 0; i < ctx->level; i++) {
int option = ctx->chosen_options[i];
printf("+ %d : ", option);
print_option(instance, option);
}
printf("\n");
printf("----------------------------------------------------\n");
}
void cover(const struct instance_t *instance, struct context_t *ctx, int item);
void choose_option(const struct instance_t *instance, struct context_t *ctx,
int option, int chosen_item)
{
ctx->chosen_options[ctx->level] = option;
ctx->level++;
for (int p = instance->ptr[option]; p < instance->ptr[option + 1]; p++) {
int item = instance->options[p];
if (item == chosen_item)
continue;
cover(instance, ctx, item);
}
}
void uncover(const struct instance_t *instance, struct context_t *ctx, int item);
void unchoose_option(const struct instance_t *instance, struct context_t *ctx,
int option, int chosen_item)
{
for (int p = instance->ptr[option + 1] - 1; p >= instance->ptr[option]; p--) {
int item = instance->options[p];
if (item == chosen_item)
continue;
uncover(instance, ctx, item);
}
ctx->level--;
}
int choose_next_item(struct context_t *ctx)
{
int best_item = -1;
int best_options = 0x7fffffff;
struct sparse_array_t *active_items = ctx->active_items;
for (int i = 0; i < active_items->len; i++) {
int item = active_items->p[i];
struct sparse_array_t *active_options = ctx->active_options[item];
int k = active_options->len;
if (k < best_options) {
best_item = item;
best_options = k;
}
}
return best_item;
}
void progress_report(const struct context_t *ctx)
{
double now = wtime();
printf("Exploré %lld noeuds, trouvé %lld solutions, temps écoulé %.1fs. ",
ctx->nodes, ctx->solutions, now - start);
int i = 0;
for (int k = 0; k < ctx->level; k++) {
if (i > 44)
break;
int n = ctx->child_num[k];
int m = ctx->num_children[k];
if (m == 1)
continue;
printf("%c%c ", (n < 62) ? DIGITS[n] : '*', (m < 62) ? DIGITS[m] : '*');
i++;
}
printf("\n"),
next_report += report_delta;
}
void deactivate(const struct instance_t *instance, struct context_t *ctx,
int option, int covered_item);
void cover(const struct instance_t *instance, struct context_t *ctx, int item)
{
if (item_is_primary(instance, item))
sparse_array_remove(ctx->active_items, item);
struct sparse_array_t *active_options = ctx->active_options[item];
for (int i = 0; i < active_options->len; i++) {
int option = active_options->p[i];
deactivate(instance, ctx, option, item);
}
}
void deactivate(const struct instance_t *instance, struct context_t *ctx,
int option, int covered_item)
{
for (int k = instance->ptr[option]; k < instance->ptr[option+1]; k++) {
int item = instance->options[k];
if (item == covered_item)
continue;
sparse_array_remove(ctx->active_options[item], option);
}
}
void reactivate(const struct instance_t *instance, struct context_t *ctx,
int option, int uncovered_item);
void uncover(const struct instance_t *instance, struct context_t *ctx, int item)
{
struct sparse_array_t *active_options = ctx->active_options[item];
for (int i = active_options->len - 1; i >= 0; i--) {
int option = active_options->p[i];
reactivate(instance, ctx, option, item);
}
if (item_is_primary(instance, item))
sparse_array_unremove(ctx->active_items);
}
void reactivate(const struct instance_t *instance, struct context_t *ctx,
int option, int uncovered_item)
{
for (int k = instance->ptr[option + 1] - 1; k >= instance->ptr[option]; k--) {
int item = instance->options[k];
if (item == uncovered_item)
continue;
sparse_array_unremove(ctx->active_options[item]);
}
}
struct instance_t *load_matrix(const char *filename)
{
struct instance_t *instance = malloc(sizeof(*instance));
if (instance == NULL)
err(1, "Impossible d'allouer l'instance");
FILE *in = fopen(filename, "r");
if (in == NULL)
err(1, "Impossible d'ouvrir %s en lecture", filename);
int n_it, n_op;
if (fscanf(in, "%d %d\n", &n_it, &n_op) != 2)
errx(1, "Erreur de lecture de la taille du problème\n");
if (n_it == 0 || n_op == 0)
errx(1, "Impossible d'avoir 0 objets ou 0 options");
instance->n_items = n_it;
instance->n_primary = 0;
instance->n_options = n_op;
instance->item_name = malloc(n_it * sizeof(char *));
instance->ptr = malloc((n_op + 1) * sizeof(int));
instance->options = malloc(n_it * n_op *sizeof(int)); // surallocation massive
if (instance->item_name == NULL || instance->ptr == NULL || instance->options == NULL)
err(1, "Impossible d'allouer la mémoire pour stocker la matrice");
enum state_t {START, ID, WHITESPACE, BAR, ENDLINE, ENDFILE};
enum state_t state = START;
char buffer[256];
int i = 0; // prochain octet disponible du buffer
int n = 0; // dernier octet disponible du buffer
char id[65];
id[64] = 0; // sentinelle à la fin, quoi qu'il arrive
int j = 0; // longueur de l'identifiant en cours de lecture
int current_item = 0;
while (state != ENDLINE) {
enum state_t prev_state = state;
if (i >= n) {
n = fread(buffer, 1, 256, in);
if (n == 0) {
if (feof(in)) {
state = ENDFILE;
}
if (ferror(in))
err(1, "erreur lors de la lecture de %s", in_filename);
}
i = 0;
}
if (state == ENDFILE) {
// don't examine buffer[i]
} else if (buffer[i] == '\n') {
state = ENDLINE;
} else if (buffer[i] == '|') {
state = BAR;
} else if (isspace(buffer[i])) {
state = WHITESPACE;
} else {
state = ID;
}
// traite le caractère lu
if (state == ID) {
if (j == 64)
errx(1, "nom d'objet trop long : %s", id);
id[j] = buffer[i];
j++;
}
if (prev_state == ID && state != ID) {
id[j] = '\0';
if (current_item == instance->n_items)
errx(1, "Objet excedentaire : %s", id);
for (int k = 0; k < current_item; k++)
if (strcmp(id, instance->item_name[k]) == 0)
errx(1, "Nom d'objets dupliqué : %s", id);
instance->item_name[current_item] = malloc(j+1);
strcpy(instance->item_name[current_item], id);
current_item++;
j = 0;
}
if (state == BAR)
instance->n_primary = current_item;
if (state == ENDFILE)
errx(1, "Fin de fichier prématurée");
// passe au prochain caractère
i++;
}
if (current_item != instance->n_items)
errx(1, "Incohérence : %d objets attendus mais seulement %d fournis\n",
instance->n_items, current_item);
if (instance->n_primary == 0)
instance->n_primary = instance->n_items;
int current_option = 0;
int p = 0; // pointeur courant dans instance->options
instance->ptr[0] = p;
bool has_primary = false;
while (state != ENDFILE) {
enum state_t prev_state = state;
if (i >= n) {
n = fread(buffer, 1, 256, in);
if (n == 0) {
if (feof(in)) {
state = ENDFILE;
}
if (ferror(in))
err(1, "erreur lors de la lecture de %s", in_filename);
}
i = 0;
}
if (state == ENDFILE) {
// don't examine buffer[i]
} else if (buffer[i] == '\n') {
state = ENDLINE;
} else if (buffer[i] == '|') {
state = BAR;
} else if (isspace(buffer[i])) {
state = WHITESPACE;
} else {
state = ID;
}
// traite le caractère lu
if (state == ID) {
if (j == 64)
errx(1, "nom d'objet trop long : %s", id);
id[j] = buffer[i];
j++;
}
if (prev_state == ID && state != ID) {
id[j] = '\0';
// identifie le numéro de l'objet en question
int item_number = -1;
for (int k = 0; k < instance->n_items; k++)
if (strcmp(id, instance->item_name[k]) == 0) {
item_number = k;
break;
}
if (item_number == -1)
errx(1, "Objet %s inconnu dans l'option #%d", id, current_option);
// détecte les objets répétés
for (int k = instance->ptr[current_option]; k < p; k++)
if (item_number == instance->options[k])
errx(1, "Objet %s répété dans l'option %d\n",
instance->item_name[item_number], current_option);
instance->options[p] = item_number;
p++;
has_primary |= item_is_primary(instance, item_number);
j = 0;
}
if (state == BAR) {
errx(1, "Trouvé | dans une option.");
}
if ((state == ENDLINE || state == ENDFILE)) {
// esquive les lignes vides
if (p > instance->ptr[current_option]) {
if (current_option == instance->n_options)
errx(1, "Option excédentaire");
if (!has_primary)
errx(1, "Option %d sans objet primaire\n", current_option);
current_option++;
instance->ptr[current_option] = p;
has_primary = false;
}
}
// passe au prochain caractère
i++;
}
if (current_option != instance->n_options)
errx(1, "Incohérence : %d options attendues mais seulement %d fournies\n",
instance->n_options, current_option);
fclose(in);
fprintf(stderr, "Lu %d objets (%d principaux) et %d options\n",
instance->n_items, instance->n_primary, instance->n_options);
return instance;
}
/**
* Envoie l'instance par le processeur principal à tous les autres processeurs.
*
* @param instance instance
*/
void send_instance(struct instance_t *instance)
{
MPI_Bcast(&instance->n_items, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Bcast(&instance->n_primary, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Bcast(&instance->n_options, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Bcast(instance->item_name, instance->n_items * sizeof(char*), MPI_CHAR, ROOT, MPI_COMM_WORLD);
MPI_Bcast(instance->options, instance->n_options * instance->n_items, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Bcast(instance->ptr, instance->n_options + 1, MPI_INT, ROOT, MPI_COMM_WORLD);
}
/**
* Reçoit l'instance envoyée par le processeur principal.
*
* @return instance
*/
struct instance_t *recv_instance()
{
/* Allocation de l'instance */
struct instance_t *instance = malloc(sizeof(*instance));
/* Récupération des entiers */
MPI_Bcast(&instance->n_items, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Bcast(&instance->n_primary, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Bcast(&instance->n_options, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
/* Allocation des tableaux */
instance->item_name = malloc(instance->n_items * sizeof(char*));
instance->options = malloc(instance->n_options * instance->n_items * sizeof(int));
instance->ptr = malloc((instance->n_options + 1) * sizeof(int));
/* Récupération des données des tableaux */
MPI_Bcast(instance->item_name, instance->n_items * sizeof(char*), MPI_CHAR, ROOT, MPI_COMM_WORLD);
MPI_Bcast(instance->options, instance->n_options * instance->n_items, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Bcast(instance->ptr, instance->n_options + 1, MPI_INT, ROOT, MPI_COMM_WORLD);
return instance;
}
struct context_t * backtracking_setup(const struct instance_t *instance)
{
struct context_t *ctx = malloc(sizeof(*ctx));
if (ctx == NULL)
err(1, "impossible d'allouer un contexte");
ctx->level = 0;
ctx->nodes = 0;
ctx->solutions = 0;
int n = instance->n_items;
int m = instance->n_options;
ctx->active_options = malloc(n * sizeof(*ctx->active_options));
ctx->chosen_options = malloc(n * sizeof(*ctx->chosen_options));
ctx->child_num = malloc(n * sizeof(*ctx->child_num));
ctx->num_children = malloc(n * sizeof(*ctx->num_children));
if (ctx->active_options == NULL || ctx->chosen_options == NULL
|| ctx->child_num == NULL || ctx->num_children == NULL)
err(1, "impossible d'allouer le contexte");
ctx->active_items = sparse_array_init(n);
for (int item = 0; item < instance->n_primary; item++)
sparse_array_add(ctx->active_items, item);
for (int item = 0; item < n; item++)
ctx->active_options[item] = sparse_array_init(m);
for (int option = 0; option < m; option++)
for (int k = instance->ptr[option]; k < instance->ptr[option + 1]; k++) {
int item = instance->options[k];
sparse_array_add(ctx->active_options[item], option);
}
return ctx;
}
/**
* Copie un tableau d'entiers.
*
* @param a tableau d'entiers
* @param n taille du tableau
* @return copie de a
*/
int *array_copy(const int *a, int n)
{
int *A = malloc(n * sizeof(int));
if (A == NULL)
err(1, "impossible d'allouer un tableau");
for (int i = 0; i < n; i++)
{
A[i] = a[i];
}
return A;
}
/**
* Copie un tableau creux.
*
* @param s tableau creux
* @return copie de s
*/
struct sparse_array_t *sparse_array_copy(const struct sparse_array_t *s)
{
struct sparse_array_t *S = malloc(sizeof(*S));
if (S == NULL)
err(1, "impossible d'allouer un tableau creux");
S->len = s->len;
S->capacity = s->capacity;
S->p = array_copy(s->p, s->capacity);
S->q = array_copy(s->q, s->capacity);
return S;
}
/**
* Crée une copie du contexte donné en argument.
*
* @param ctx contexte
* @param n nombre d'items
* @return copie de ctx
*/
struct context_t * copy_ctx(const struct context_t *ctx, int n)
{
struct context_t *ctx_copy = malloc(sizeof(*ctx_copy));
if (ctx_copy == NULL)
err(1, "impossible d'allouer un contexte");
/* Copie de level, nodes et solutions */
ctx_copy->level = ctx->level;
ctx_copy->nodes = ctx->nodes;
ctx_copy->solutions = ctx->solutions;
/* Copie de chosen_options */
ctx_copy->chosen_options = array_copy(ctx->chosen_options, n);
/* Copie de child_num */
ctx_copy->child_num = array_copy(ctx->child_num, n);
/* Copie de num_children */
ctx_copy->num_children = array_copy(ctx->num_children, n);
/* Copie de active_items */
ctx_copy->active_items = sparse_array_copy(ctx->active_items);
/* Copie de active_options */
ctx_copy->active_options = malloc(n * sizeof(*ctx_copy->active_options));
for (int item = 0; item < n; item++)
ctx_copy->active_options[item] = sparse_array_copy(ctx->active_options[item]);
return ctx_copy;
}
/**
* Nettoie la mémoire pour un tableau creux.
*
* @param S tableau creux
*/
void sparse_array_free(struct sparse_array_t *S)
{
free(S->p);
free(S->q);
free(S);
}
/**
* Nettoie la mémoire pour un contexte.
*
* @param ctx contexte
* @param n nombre d'items
*/
void free_ctx(struct context_t *ctx, int n)
{
sparse_array_free(ctx->active_items);
for (int item = 0; item < n; item++)
sparse_array_free(ctx->active_options[item]);
free(ctx->active_options);
free(ctx->chosen_options);
free(ctx->child_num);
free(ctx->num_children);
free(ctx);
}
/**
* Nettoie la mémoire pour une instance.
*
* @param instance instance
*/
void free_instance(struct instance_t *instance)
{
if (instance->item_name != NULL)
{
// for (int i = 0; i < instance->n_items; i++)
// {
// free(instance->item_name[i]);
// }
free(instance->item_name);
}
free(instance->options);
free(instance->ptr);
free(instance);
}
/**
* Retourne true si la file est vide.
*
* @return file vide
*/
bool queue_is_empty()
{
return queue_size == 0;
}
/**
* Enfile un contexte.
*
* @param ctx contexte
*/
void enqueue(struct context_t *ctx)
{
queue[++queue_rear] = ctx;
queue_size++;
}
/**
* Défile un contexte.
*
* @return contexte
*/
struct context_t *dequeue()
{
queue_size--;
return queue[queue_front++];
}
/**
* Libère la mémoire occupée par la file.
*
* @param n nombre d'items
*/
void free_queue(int n)
{
int size = queue_size;
for (int i = 0; i < size; i++)
{
free_ctx(dequeue(), n);
}
free(queue);
}
/**
* Vide la file.
*
* @param n nombre d'items
*/
void reset_queue(int n)
{
int size = queue_size;
for (int i = 0; i < size; i++)
{
free_ctx(dequeue(), n);
}
queue_front = 0;
queue_rear = -1;
queue_size = 0;
}
void solve(const struct instance_t *instance, struct context_t *ctx)
{
ctx->nodes++;
// if (ctx->nodes == next_report)
// progress_report(ctx);
if (sparse_array_empty(ctx->active_items)) {
solution_found(instance, ctx);
return; /* succès : plus d'objet actif */
}
int chosen_item = choose_next_item(ctx);
struct sparse_array_t *active_options = ctx->active_options[chosen_item];
if (sparse_array_empty(active_options))
return; /* échec : impossible de couvrir chosen_item */
cover(instance, ctx, chosen_item);
ctx->num_children[ctx->level] = active_options->len;
for (int k = 0; k < active_options->len; k++) {
int option = active_options->p[k];
ctx->child_num[ctx->level] = k;
choose_option(instance, ctx, option, chosen_item);
solve(instance, ctx);
if (ctx->solutions >= max_solutions)
return;
unchoose_option(instance, ctx, option, chosen_item);
}
uncover(instance, ctx, chosen_item); /* backtrack */
}
/**
* Ajoute à la file les contextes à traiter en parallèle en effectuant un
* parcours BFS s'arrêtant à un certain niveau. À la fin de l'exécution de la
* fonction, la file contient les contextes à distribuer aux autres processeurs.
*
* La fonction retourne le nombre de solutions trouvées pendant le BFS.
*
* @param instance instance
* @param level niveau d'arrêt du BFS correspondant à la longueur des listes d'options
* @return solutions
*/
long long solve_bfs_root(const struct instance_t *instance, int *level)
{
/* Variable pour mesurer le temps d'exécution du BFS */
double t_start;
/* Compteur de noeuds à un certain niveau de l'arbre */
int count;
/* Nombre de solutions trouvées */
long long solutions = 0;
/* Niveau de l'arbre */
*level = 0;
/* Initialise la file */
queue = malloc(instance->n_options * instance->n_options * sizeof(struct context_t*));
struct context_t *ctx = backtracking_setup(instance);
enqueue(ctx);
/* Parcours BFS */
printf("START BFS\n");
t_start = wtime();
while (!queue_is_empty())
{
count = queue_size;
printf("- Level %d: %d nodes\n", *level, count);
/* Condition d'arrêt */
if (count > MIN_ROOT)
{
break;
}
while (count > 0)
{
ctx = dequeue();
count--;
if (sparse_array_empty(ctx->active_items))
{
solutions++;
free_ctx(ctx, instance->n_items);
continue; /* succès : plus d'objet actif */
}
int chosen_item = choose_next_item(ctx);
struct sparse_array_t *active_options = ctx->active_options[chosen_item];
if (sparse_array_empty(active_options))
{
free_ctx(ctx, instance->n_items);
continue; /* échec : impossible de couvrir chosen_item */
}
cover(instance, ctx, chosen_item);
ctx->num_children[ctx->level] = active_options->len;
#pragma omp parallel for
for (int k = 0; k < active_options->len; k++)
{
int option = active_options->p[k];
/* Copie du contexte */
struct context_t *ctx_copy = copy_ctx(ctx, instance->n_items);
/* Choix de l'option sur la copie */
ctx_copy->child_num[ctx_copy->level] = k;
choose_option(instance, ctx_copy, option, chosen_item);
/* Ajout du contexte à la file */
#pragma omp critical
enqueue(ctx_copy);
}
free_ctx(ctx, instance->n_items);
}
(*level)++;
}
printf("END BFS: %1.fs\n", wtime() - t_start);
printf("Tasks: %d\n", queue_size);
return solutions;
}
/**
* Résout l'instance du problème en effectuant un parcours BFS s'arrêtant à un
* certain niveau, puis termine la résolution en parallèle avec la fonction solve.
*
* La fonction retourne le nombre de solutions trouvées.
*
* @param instance instance
* @param ctx contexte
* @return solutions
*/
long long solve_bfs_worker(const struct instance_t *instance, struct context_t *ctx)
{
/* Compteur de noeuds à un certain niveau de l'arbre */
int count;
/* Niveau de l'arbre */
int level = 0;
/* Nombre de solutions trouvées */
long long solutions = 0;
/* Initialise la file */
enqueue(ctx);
/* Parcours BFS */
while (!queue_is_empty())
{
count = queue_size;
/* Condition d'arrêt */
if (count > MIN_WORKER)
{
break;
}
while (count > 0)
{
ctx = dequeue();
count--;
if (sparse_array_empty(ctx->active_items))
{
solutions++;
free_ctx(ctx, instance->n_items);
continue; /* succès : plus d'objet actif */
}
int chosen_item = choose_next_item(ctx);
struct sparse_array_t *active_options = ctx->active_options[chosen_item];
if (sparse_array_empty(active_options))
{
free_ctx(ctx, instance->n_items);
continue; /* échec : impossible de couvrir chosen_item */
}
cover(instance, ctx, chosen_item);
ctx->num_children[ctx->level] = active_options->len;
#pragma omp parallel for
for (int k = 0; k < active_options->len; k++)
{
int option = active_options->p[k];
/* Copie du contexte */
struct context_t *ctx_copy = copy_ctx(ctx, instance->n_items);
/* Choix de l'option sur la copie */
ctx_copy->child_num[ctx_copy->level] = k;
choose_option(instance, ctx_copy, option, chosen_item);
/* Ajout du contexte à la file */
#pragma omp critical
enqueue(ctx_copy);
}
free_ctx(ctx, instance->n_items);
}
level++;
}
/* Solve en parallèle */
#pragma omp parallel for reduction(+:solutions) schedule(dynamic)
for (int i = queue_front; i <= queue_rear; i++)
{
queue[i]->solutions = 0;
solve(instance, queue[i]);
solutions += queue[i]->solutions;
}
/* Reset la file */
reset_queue(instance->n_items);
return solutions;
}
int main(int argc, char **argv)
{
struct option longopts[5] = {
{"in", required_argument, NULL, 'i'},
{"progress-report", required_argument, NULL, 'v'},
{"print-solutions", no_argument, NULL, 'p'},
{"stop-after", required_argument, NULL, 's'},
{NULL, 0, NULL, 0}
};
char ch;
while ((ch = getopt_long(argc, argv, "", longopts, NULL)) != -1) {
switch (ch) {
case 'i':
in_filename = optarg;
break;
case 'p':
print_solutions = true;
break;
case 's':
max_solutions = atoll(optarg);
break;
case 'v':
report_delta = atoll(optarg);
break;
default:
errx(1, "Unknown option\n");
}
}
if (in_filename == NULL)
usage(argv);
next_report = report_delta;
/* Variables MPI */
int size, rank;
MPI_Status status;
/* Tags des messages */
enum Tag{AVAILABLE, WORK_TODO, WORK_DONE, WORK, END};
/* Initialisation de MPI */
MPI_Init(&argc, &argv);
/* Nombre de processeurs */
MPI_Comm_size(MPI_COMM_WORLD, &size);
/* Rang du processeur */
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* Récupération de l'instance */
struct instance_t *instance;
if (rank == ROOT)
{
/* Lecture de l'instance dans le fichier */
instance = load_matrix(in_filename);
/* Envoie de l'instance aux autres processeurs */
send_instance(instance);
}
else
{
/* Reçoit l'instance */
instance = recv_instance();
}
/* Contexte */
struct context_t *ctx;
/* Variable d'arrêt */
bool run = true;
/* Variables pour gérer le travail à faire */
int task; // numéro de tâche à attribuer
int stopped; // nombre de processeurs arrêtés
int level; // niveau d'arrêt du BFS correspondant à la longueur des listes d'options
int *chosen_options; // buffer pour recevoir les options
long long work; // variable pour recevoir le travail effectué
long long solutions; // nombre de solutions trouvées
/* Start solve */
printf("[DEBUG] P%d: START\n", rank);
/* Processeur principal */
if (rank == ROOT)
{
start = wtime();
/* Débute la résolution et crée les contextes à envoyer aux ouvriers */
solutions = solve_bfs_root(instance, &level);
/* Le patron envoie le nombre d'options aux ouvriers */
MPI_Bcast(&level, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
/* Initialisation des variables */
stopped = 0;
task = queue_front;
/* Work loop */
while (run)
{
/* Reçoit un message d'un ouvrier */
MPI_Recv(NULL, 0, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
switch (status.MPI_TAG)
{
case AVAILABLE:
/* Envoie le travail à faire s'il en reste */
if (task <= queue_rear)
{
/* Envoie les options */
MPI_Send(queue[task]->chosen_options, level,
MPI_INT, status.MPI_SOURCE,
WORK_TODO, MPI_COMM_WORLD);
task++;
printf("%d/%d\n", task - queue_front, queue_size);
}
/* Signale la fin du travail sinon */
else
{
MPI_Send(NULL, 0, MPI_INT, status.MPI_SOURCE,
END, MPI_COMM_WORLD);
stopped++;
run = stopped < size - 1;
}
break;
case WORK_DONE:
/* Reçoit le travail fait : nombre de solutions trouvées */
MPI_Recv(&work, 1, MPI_LONG_LONG, status.MPI_SOURCE,
WORK, MPI_COMM_WORLD, &status);
solutions += work;
break;
default:
fprintf(stderr, "Unknown message\n");
break;
}
}
/* Libère la mémoire occupée par la file */
free_queue(instance->n_items);
printf("FINI. Trouvé %lld solutions en %.1fs\n", solutions,
wtime() - start);
}
/* Processeur ouvrier */
else
{
/* Allocation de la file */
queue = malloc(instance->n_options * instance->n_options * sizeof(struct context_t*));
/* Reçoit le nombre d'options */
MPI_Bcast(&level, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
/* Allocation du tableau d'options pour recevoir les messages */
chosen_options = malloc(level * sizeof(int));
/* Work loop */
while (run)
{
/* Dit au patron qu'il est disponible */
MPI_Send(NULL, 0, MPI_INT, ROOT, AVAILABLE, MPI_COMM_WORLD);
/* Reçoit un message du patron */
MPI_Recv(chosen_options, level, MPI_INT, ROOT, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
switch (status.MPI_TAG)
{
case WORK_TODO:
/* Résout le problème pour le sous-arbre demandé */
/* Création du contexte */
ctx = backtracking_setup(instance);
/* On choisit les options */
for (int i = 0; i < level; i++)
choose_option(instance, ctx, chosen_options[i], NULL_ITEM);
/* Solve */
solutions = solve_bfs_worker(instance, ctx);
/* Prévient le patron qu'il va recevoir le travail */
MPI_Send(NULL, 0, MPI_INT, ROOT, WORK_DONE, MPI_COMM_WORLD);
/* Envoie au patron le nombre de solutions trouvées */
MPI_Send(&solutions, 1, MPI_LONG_LONG, ROOT, WORK, MPI_COMM_WORLD);
break;
case END:
/* Travail terminé */
run = false;
free(queue);
free(chosen_options);
break;
default:
fprintf(stderr, "Unknown message\n");
break;
}
}
}
/* Free instance */
free_instance(instance);
printf("[DEBUG] P%d: END\n", rank);
/* Finalisation MPI */
MPI_Finalize();
exit(EXIT_SUCCESS);
}
|
Grid.c | /* Grid.c */
/**********************************************************************************************************
Copyright (c) 2002-2013 Abdul-Rahman Allouche. All rights reserved
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the Gabedit), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
************************************************************************************************************/
#include "../../Config.h"
#ifdef ENABLE_OMP
#include <omp.h>
#endif
#include "../Utils/Constants.h"
#include "GlobalOrb.h"
#include "StatusOrb.h"
#include "UtilsOrb.h"
#include "ColorMap.h"
#include "../MultiGrid/PoissonMG.h"
#include "../Utils/UtilsInterface.h"
#include "../Utils/Utils.h"
#include "../Utils/Zlm.h"
#include "../Utils/MathFunctions.h"
#include "../Utils/GTF.h"
#include "../Utils/QL.h"
/************************************************************************/
static gdouble get_value_elf_becke(gdouble x,gdouble y,gdouble z,gint dump);
static gdouble get_value_elf_savin(gdouble x,gdouble y,gdouble z,gint dump);
static gdouble get_value_sas(gdouble x,gdouble y,gdouble z,gint dump);
static gdouble get_value_fed(gdouble x,gdouble y,gdouble z,gdouble alpha, gint n, gdouble eHOMO, gdouble eLUMO);
static gdouble get_energy_homo();
static gdouble get_energy_lumo();
/************************************************************************/
gdouble get_value_STF(gdouble x,gdouble y,gdouble z,gint i,gint n)
{
gdouble v = 0.0;
gdouble d = 0;
gdouble de = 0;
gdouble xi = x-SAOrb[i].Stf[n].C[0];
gdouble yi = y-SAOrb[i].Stf[n].C[1];
gdouble zi = z-SAOrb[i].Stf[n].C[2];
gint ll = SAOrb[i].Stf[n].l[0]+
SAOrb[i].Stf[n].l[1]+
SAOrb[i].Stf[n].l[2];
d = (xi*xi)+(yi*yi)+(zi*zi);
d = sqrt(d);
de =d*SAOrb[i].Stf[n].Ex;
if(de>40) return 1e-14;
v = SAOrb[i].Stf[n].Coef*pow(d,SAOrb[i].Stf[n].pqn-1-ll)*
pow(xi,SAOrb[i].Stf[n].l[0])*
pow(yi,SAOrb[i].Stf[n].l[1])*
pow(zi,SAOrb[i].Stf[n].l[2])*
exp(-de);
return v;
}
/**************************************************************/
gdouble get_value_CSTF(gdouble x,gdouble y,gdouble z,gint i)
{
gdouble v = 0.0;
gint n;
for(n=0;n<SAOrb[i].N;n++)
v+= get_value_STF(x,y,z,i,n);
return v;
}
/************************************************************************/
gdouble get_value_GTF(gdouble x,gdouble y,gdouble z,gint i,gint n)
{
gdouble v = 0.0;
gdouble d = 0;
gdouble xi = x-AOrb[i].Gtf[n].C[0];
gdouble yi = y-AOrb[i].Gtf[n].C[1];
gdouble zi = z-AOrb[i].Gtf[n].C[2];
d = (xi*xi)+(yi*yi)+(zi*zi);
d *=AOrb[i].Gtf[n].Ex;
if(d>40) return 1e-14;
v = AOrb[i].Gtf[n].Coef*
pow(xi,AOrb[i].Gtf[n].l[0])*
pow(yi,AOrb[i].Gtf[n].l[1])*
pow(zi,AOrb[i].Gtf[n].l[2])*
exp(-d);
return v;
}
/**************************************************************/
gdouble get_value_CGTF(gdouble x,gdouble y,gdouble z,gint i)
{
gdouble v = 0.0;
gint n;
for(n=0;n<AOrb[i].numberOfFunctions;n++)
v+= get_value_GTF(x,y,z,i,n);
return v;
}
/**************************************************************/
gdouble get_value_CBTF(gdouble x,gdouble y,gdouble z,gint i)
{
if(AOrb) return get_value_CGTF(x, y, z, i);
else if(SAOrb) return get_value_CSTF(x, y, z, i);
else return 0;
}
/**************************************************************/
gdouble get_value_orbital(gdouble x,gdouble y,gdouble z,gint k)
{
gdouble v=0.0;
gint i;
if(TypeSelOrb == 1)
for(i=0;i<NAOrb;i++)
{
if(fabs(CoefAlphaOrbitals[k][i])>1e-10)
v+=CoefAlphaOrbitals[k][i]*get_value_CBTF(x,y,z,i);
}
else
for(i=0;i<NAOrb;i++)
{
if(fabs(CoefBetaOrbitals[k][i])>1e-10)
v+=CoefBetaOrbitals[k][i]*get_value_CBTF(x,y,z,i);
}
return v;
}
/**************************************************************/
gdouble get_value_electronic_density_on_atom(gdouble x,gdouble y,gdouble z,gint n)
{
gdouble v1 = 0.0;
gdouble v2 = 0.0;
gdouble cgv = 0.0;
gint i;
gint k1;
gint k2;
gdouble *PhiAlpha = g_malloc(GeomOrb[n].NAlphaOrb*sizeof(gdouble));
gdouble *PhiBeta = g_malloc(GeomOrb[n].NBetaOrb*sizeof(gdouble));
for(k1=0;k1<GeomOrb[n].NAlphaOrb;k1++)
PhiAlpha[k1] = 0.0;
for(k2=0;k2<GeomOrb[n].NBetaOrb;k2++)
PhiBeta[k2] = 0.0;
for(i=0;i<GeomOrb[n].NAOrb;i++)
{
cgv = get_value_CBTF(x,y,z,GeomOrb[n].NumOrb[i]);
for(k1=0;k1<GeomOrb[n].NAlphaOrb;k1++)
PhiAlpha[k1] += GeomOrb[n].CoefAlphaOrbitals[k1][i]*cgv;
for(k2=0;k2<GeomOrb[n].NBetaOrb;k2++)
PhiBeta[k2] += GeomOrb[n].CoefBetaOrbitals[k2][i]*cgv;
}
v1 = 0.0;
for(k1=0;k1<GeomOrb[n].NAlphaOrb;k1++)
if(GeomOrb[n].OccAlphaOrbitals[k1]>1e-8)
v1 += GeomOrb[n].OccAlphaOrbitals[k1]*PhiAlpha[k1]*PhiAlpha[k1];
v2 = 0.0;
for(k2=0;k2<GeomOrb[n].NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
v2 += GeomOrb[n].OccBetaOrbitals[k2]*PhiBeta[k2]*PhiBeta[k2];
g_free(PhiAlpha);
g_free(PhiBeta);
return v1+v2;
}
/**************************************************************/
gdouble get_value_electronic_density_atomic(gdouble x,gdouble y,gdouble z,gint dump)
{
gdouble v = 0.0;
gint i;
for(i=0;i<Ncenters;i++)
v += get_value_electronic_density_on_atom(x,y,z,i);
return v;
}
/**************************************************************/
gdouble get_value_electronic_density(gdouble x,gdouble y,gdouble z,gint dump)
{
gdouble v1 = 0.0;
gdouble v2 = 0.0;
gdouble cgv = 0.0;
gint i;
gint k1;
gint k2;
gdouble *PhiAlpha = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *PhiBeta = g_malloc(NBetaOrb*sizeof(gdouble));
for(k1=0;k1<NAlphaOrb;k1++)
PhiAlpha[k1] = 0.0;
for(k2=0;k2<NBetaOrb;k2++)
PhiBeta[k2] = 0.0;
for(i=0;i<NAOrb;i++)
{
cgv = get_value_CBTF(x,y,z,i);
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
PhiAlpha[k1] += CoefAlphaOrbitals[k1][i]*cgv;
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
PhiBeta[k2] += CoefBetaOrbitals[k2][i]*cgv;
}
v1 = 0.0;
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
v1 += OccAlphaOrbitals[k1]*PhiAlpha[k1]*PhiAlpha[k1];
v2 = 0.0;
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
v2 += OccBetaOrbitals[k2]*PhiBeta[k2]*PhiBeta[k2];
g_free(PhiAlpha);
g_free(PhiBeta);
return v1+v2;
}
/**************************************************************/
gdouble get_value_electronic_density_bonds(gdouble x,gdouble y,gdouble z,gint dump)
{
gdouble v = 0.0;
v = get_value_electronic_density(x,y,z,dump);
v -= get_value_electronic_density_atomic(x,y,z,dump);
return v;
}
/**************************************************************/
gdouble get_value_spin_density(gdouble x,gdouble y,gdouble z,gint dump)
{
gdouble v1 = 0.0;
gdouble v2 = 0.0;
gdouble cgv = 0.0;
gint i;
gint k1;
gint k2;
gdouble *PhiAlpha = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *PhiBeta = g_malloc(NBetaOrb*sizeof(gdouble));
for(k1=0;k1<NAlphaOrb;k1++)
PhiAlpha[k1] = 0.0;
for(k2=0;k2<NBetaOrb;k2++)
PhiBeta[k2] = 0.0;
for(i=0;i<NAOrb;i++)
{
cgv = get_value_CBTF(x,y,z,i);
for(k1=0;k1<NAlphaOcc;k1++)
PhiAlpha[k1] += CoefAlphaOrbitals[k1][i]*cgv;
for(k2=0;k2<NBetaOcc;k2++)
PhiBeta[k2] += CoefBetaOrbitals[k2][i]*cgv;
}
v1 = 0.0;
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
v1 += OccAlphaOrbitals[k1]*PhiAlpha[k1]*PhiAlpha[k1];
v2 = 0.0;
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
v2 += OccBetaOrbitals[k2]*PhiBeta[k2]*PhiBeta[k2];
g_free(PhiAlpha);
g_free(PhiBeta);
return v1-v2;
}
/**************************************************************/
gdouble get_value_electrostatic_potential(gdouble x,gdouble y,gdouble z,gdouble* XkXl)
{
gdouble v = 0.0;
gint i;
gint j;
gint k;
gint kl = 0;
gdouble C[] = {x,y,z};
gdouble schwarzCutOff = 1e-2;
if(!AOrb) return 0;
for(i=0;i<NAOrb;i++) XkXl[kl++] = ionicPotentialCGTF(&AOrb[i], &AOrb[i], C, 1.0);
for(i=0;i<NAOrb;i++)
for(j=0;j<i;j++)
{
if( fabs(XkXl[i]* XkXl[j])>schwarzCutOff) XkXl[kl++] = ionicPotentialCGTF(&AOrb[i], &AOrb[j], C, 1.0);
else XkXl[kl++] = 0;
}
/*if(kl!=NAOrb*(NAOrb+1)/2) exit(1);*/
if(kl!=NAOrb*(NAOrb+1)/2) printf("Erreur\n");
v = 0;
for(k=0;k<NAlphaOrb;k++)
if(OccAlphaOrbitals[k]>1e-8)
{
kl = 0;
for(i=0;i<NAOrb;i++)
v += OccAlphaOrbitals[k]*CoefAlphaOrbitals[k][i]*CoefAlphaOrbitals[k][i]*XkXl[kl++];
for(i=0;i<NAOrb;i++)
for(j=0;j<i;j++)
v += 2*OccAlphaOrbitals[k]*CoefAlphaOrbitals[k][i]*CoefAlphaOrbitals[k][j]*XkXl[kl++];
}
if(CoefBetaOrbitals==CoefAlphaOrbitals) v *= 2;
else
{
if(OccBetaOrbitals[k]>1e-8)
{
kl = 0;
for(i=0;i<NAOrb;i++)
v += OccBetaOrbitals[k]*CoefBetaOrbitals[k][i]*CoefBetaOrbitals[k][i]*XkXl[kl++];
for(i=0;i<NAOrb;i++)
for(j=0;j<i;j++)
v += 2*OccBetaOrbitals[k]*CoefBetaOrbitals[k][i]*CoefBetaOrbitals[k][j]*XkXl[kl++];
if(kl!=NAOrb*(NAOrb+1)/2) printf("Erreur\n");
}
}
return v;
}
/*********************************************************************************/
gboolean test_grid_all_positive(Grid* grid)
{
gint i;
gint j;
gint k;
gdouble PRECISION = 1e-8;
if(!grid) return FALSE;
for(i=0;i<grid->N[0];i++)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
if(grid->point[i][j][k].C[3]<0 && fabs(grid->point[i][j][k].C[3])>PRECISION) return FALSE;
return TRUE;
}
/**************************************************************/
void reset_limits_for_grid(Grid* grid)
{
gint i,j,k;
gdouble v;
v = grid->point[0][0][0].C[3];
grid->limits.MinMax[0][3] = v;
grid->limits.MinMax[1][3] = v;
if(!CancelCalcul)
#ifdef ENABLE_OMP
#pragma omp parallel for private(v,i,j,k)
#endif
for(i=0;i<grid->N[0];i++)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
{
v = grid->point[i][j][k].C[3];
if(grid->limits.MinMax[0][3]>v) grid->limits.MinMax[0][3] = v;
if(grid->limits.MinMax[1][3]<v) grid->limits.MinMax[1][3] = v;
}
}
/**************************************************************/
Grid* grid_point_alloc(gint N[],GridLimits limits)
{
Grid* grid = g_malloc(sizeof(Grid));
gint i,j;
grid->N[0] = N[0];
grid->N[1] = N[1];
grid->N[2] = N[2];
grid->point = g_malloc( grid->N[0]*sizeof(Point5**));
for(i=0;i< grid->N[0] ;i++)
{
grid->point[i] = g_malloc(grid->N[1]*sizeof(Point5*));
for(j=0;j< grid->N[1] ;j++)
grid->point[i][j] = g_malloc(grid->N[2]*sizeof(Point5));
}
grid->limits = limits;
grid->mapped = FALSE;
return grid;
}
/**************************************************************/
Grid* free_grid(Grid* localGrid)
{
gint i,j;
gboolean id = (localGrid==grid);
if(!localGrid) return NULL;
for(i=0;i< localGrid->N[0] ;i++)
{
for(j=0;j< localGrid->N[1] ;j++)
g_free(localGrid->point[i][j]);
g_free(localGrid->point[i]);
}
g_free(localGrid->point);
g_free(localGrid);
localGrid=NULL;
if(id)
{
GtkWidget* handleBoxColorMapGrid = g_object_get_data(G_OBJECT(PrincipalWindow), "HandleboxColorMapGrid ");
color_map_hide(handleBoxColorMapGrid);
}
return localGrid;
}
/**************************************************************/
Grid* copyGrid(Grid* grid)
{
Grid *newGrid = NULL;
gint i,j,k;
newGrid = grid_point_alloc(grid->N,grid->limits);
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
newGrid->point[i][j][k].C[0] = grid->point[i][j][k].C[0];
newGrid->point[i][j][k].C[1] = grid->point[i][j][k].C[1];
newGrid->point[i][j][k].C[2] = grid->point[i][j][k].C[2];
newGrid->point[i][j][k].C[3] = grid->point[i][j][k].C[3];
}
}
}
return newGrid;
}
/**************************************************************/
void print_grid_point(Grid* grid)
{
gint i;
gint j;
gint k;
gint n=-1;
printf("%d %d %d \n",grid->N[0],grid->N[1],grid->N[2]);
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
n++;
printf("%lf %lf %lf %lf \n",
grid->point[i][j][k].C[0],
grid->point[i][j][k].C[1],
grid->point[i][j][k].C[2],
grid->point[i][j][k].C[3]);
}
}
}
printf("Vlimits = %lf %lf \n", grid->limits.MinMax[0][3] , grid->limits.MinMax[1][3] );
}
/**************************************************************/
Grid* define_grid_point_fed(gint N[],GridLimits limits,gint n)
{
Grid* grid;
gint i;
gint j;
gint k;
gdouble x;
gdouble y;
gdouble z;
gdouble v;
gdouble scale;
gdouble V0[3];
gdouble V1[3];
gdouble V2[3];
gdouble firstPoint[3];
gdouble eHOMO = get_energy_homo();
gdouble eLUMO = get_energy_lumo();
gdouble alpha = alphaFED*AUTOEV;
/* gdouble alpha = alphaFED;*/
if(eHOMO>1e8) return NULL;
if(eLUMO>1e8 && n!=0) return NULL;
grid = grid_point_alloc(N,limits);
for(i=0;i<3;i++)
{
V0[i] = firstDirection[i] *(grid->limits.MinMax[1][0]-grid->limits.MinMax[0][0]);
V1[i] = secondDirection[i]*(grid->limits.MinMax[1][1]-grid->limits.MinMax[0][1]);
V2[i] = thirdDirection[i] *(grid->limits.MinMax[1][2]-grid->limits.MinMax[0][2]);
}
for(i=0;i<3;i++)
{
firstPoint[i] = V0[i] + V1[i] + V2[i];
/* firstPoint[i] = originOfCube[i] - firstPoint[i]/2;*/
firstPoint[i] = limits.MinMax[0][i];
}
for(i=0;i<3;i++)
{
V0[i] /= grid->N[0]-1;
V1[i] /= grid->N[1]-1;
V2[i] /= grid->N[2]-1;
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
scale = (gdouble)1.01/grid->N[0];
/* printf("Alpha = %f, n = %d eH = %f eL = %f\n",alpha,n,eHOMO, eLUMO);*/
#ifdef ENABLE_OMP
printf("# proc = %d\n", omp_get_num_procs ());
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of grid, pleasse wait..."));
#endif
#pragma omp parallel for private(x,y,z,v,i,j,k)
#endif
for(i=0;i<grid->N[0];i++)
{
if(!CancelCalcul)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
{
x = firstPoint[0] + i*V0[0] + j*V1[0] + k*V2[0];
y = firstPoint[1] + i*V0[1] + j*V1[1] + k*V2[1];
z = firstPoint[2] + i*V0[2] + j*V1[2] + k*V2[2];
v = get_value_fed( x, y, z, alpha, n, eHOMO, eLUMO);
grid->point[i][j][k].C[0] = x;
grid->point[i][j][k].C[1] = y;
grid->point[i][j][k].C[2] = z;
grid->point[i][j][k].C[3] = v;
}
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
#else
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
}
if(CancelCalcul) progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
v = grid->point[0][0][0].C[3];
grid->limits.MinMax[0][3] = v;
grid->limits.MinMax[1][3] = v;
if(!CancelCalcul)
#ifdef ENABLE_OMP
#pragma omp parallel for private(v,i,j,k)
#endif
for(i=0;i<grid->N[0];i++)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
{
v = grid->point[i][j][k].C[3];
if(grid->limits.MinMax[0][3]>v) grid->limits.MinMax[0][3] = v;
if(grid->limits.MinMax[1][3]<v) grid->limits.MinMax[1][3] = v;
}
if(CancelCalcul)
{
grid = free_grid(grid);
}
return grid;
}
/**************************************************************/
Grid* define_grid_point(gint N[],GridLimits limits,Func3d func)
{
Grid* grid;
gint i;
gint j;
gint k;
gdouble x;
gdouble y;
gdouble z;
gdouble v;
gdouble scale;
gdouble V0[3];
gdouble V1[3];
gdouble V2[3];
gdouble firstPoint[3];
grid = grid_point_alloc(N,limits);
for(i=0;i<3;i++)
{
V0[i] = firstDirection[i] *(grid->limits.MinMax[1][0]-grid->limits.MinMax[0][0]);
V1[i] = secondDirection[i]*(grid->limits.MinMax[1][1]-grid->limits.MinMax[0][1]);
V2[i] = thirdDirection[i] *(grid->limits.MinMax[1][2]-grid->limits.MinMax[0][2]);
}
for(i=0;i<3;i++)
{
firstPoint[i] = V0[i] + V1[i] + V2[i];
/*firstPoint[i] = originOfCube[i] - firstPoint[i]/2;*/
firstPoint[i] = limits.MinMax[0][i];
}
for(i=0;i<3;i++)
{
V0[i] /= grid->N[0]-1;
V1[i] /= grid->N[1]-1;
V2[i] /= grid->N[2]-1;
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
scale = (gdouble)1.01/grid->N[0];
#ifdef ENABLE_OMP
printf("# proc = %d\n", omp_get_num_procs ());
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of grid, pleasse wait..."));
#endif
#pragma omp parallel for private(x,y,z,v,i,j,k)
#endif
for(i=0;i<grid->N[0];i++)
{
if(!CancelCalcul)
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
x = firstPoint[0] + i*V0[0] + j*V1[0] + k*V2[0];
y = firstPoint[1] + i*V0[1] + j*V1[1] + k*V2[1];
z = firstPoint[2] + i*V0[2] + j*V1[2] + k*V2[2];
v = func( x, y, z,NumSelOrb);
grid->point[i][j][k].C[0] = x;
grid->point[i][j][k].C[1] = y;
grid->point[i][j][k].C[2] = z;
grid->point[i][j][k].C[3] = v;
}
}
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
{
/* printf("progress_orb\n");*/
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
/* printf("end progress_orb\n");*/
}
#endif
#else
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
}
/* printf("end loop\n");*/
if(CancelCalcul) progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
v = grid->point[0][0][0].C[3];
grid->limits.MinMax[0][3] = v;
grid->limits.MinMax[1][3] = v;
if(!CancelCalcul)
#ifdef ENABLE_OMP
#pragma omp parallel for private(v,i,j,k)
#endif
for(i=0;i<grid->N[0];i++)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
{
v = grid->point[i][j][k].C[3];
if(grid->limits.MinMax[0][3]>v) grid->limits.MinMax[0][3] = v;
if(grid->limits.MinMax[1][3]<v) grid->limits.MinMax[1][3] = v;
}
if(CancelCalcul)
{
grid = free_grid(grid);
}
return grid;
}
/**************************************************************/
Grid* define_grid(gint N[],GridLimits limits)
{
Grid *grid = NULL;
set_status_label_info(_("Grid"),_("Computing"));
CancelCalcul = FALSE;
/* printf("Begin dfine_grid\n");*/
switch(TypeGrid)
{
case GABEDIT_TYPEGRID_ORBITAL :
grid = define_grid_point(N,limits,get_value_orbital);
break;
case GABEDIT_TYPEGRID_EDENSITY :
grid = define_grid_point(N,limits,get_value_electronic_density);
break;
case GABEDIT_TYPEGRID_DDENSITY :
grid = define_grid_point(N,limits,get_value_electronic_density_bonds);
break;
case GABEDIT_TYPEGRID_ADENSITY :
grid = define_grid_point(N,limits,get_value_electronic_density_atomic);
break;
case GABEDIT_TYPEGRID_SDENSITY :
grid = define_grid_point(N,limits,get_value_spin_density);
break;
case GABEDIT_TYPEGRID_ELFBECKE :
grid = define_grid_point(N,limits,get_value_elf_becke);
break;
case GABEDIT_TYPEGRID_ELFSAVIN :
grid = define_grid_point(N,limits,get_value_elf_savin);
break;
case GABEDIT_TYPEGRID_FEDELECTROPHILIC :
grid = define_grid_point_fed( N, limits,0);
break;
case GABEDIT_TYPEGRID_FEDRADICAL :
grid = define_grid_point_fed( N, limits,1);
break;
case GABEDIT_TYPEGRID_FEDNUCLEOPHILIC :
grid = define_grid_point_fed( N, limits,2);
break;
case GABEDIT_TYPEGRID_SAS :
case GABEDIT_TYPEGRID_SASMAP :
grid = define_grid_point(N,limits,get_value_sas);
break;
case GABEDIT_TYPEGRID_MEP_CHARGES :
grid = compute_mep_grid_using_partial_charges(N, limits);
break;
case GABEDIT_TYPEGRID_MEP_MULTIPOL :
grid = compute_mep_grid_using_multipol_from_orbitals(N, limits, get_multipole_rank());
break;
case GABEDIT_TYPEGRID_MEP_CG :
grid = solve_poisson_equation_from_orbitals(N,limits, GABEDIT_CG);
break;
case GABEDIT_TYPEGRID_MEP_MG :
grid = solve_poisson_equation_from_orbitals(N,limits, GABEDIT_MG);
break;
case GABEDIT_TYPEGRID_MEP_EXACT :
grid = compute_mep_grid_exact(N,limits);
break;
case GABEDIT_TYPEGRID_NCI :
break;
}
/* printf("end dfine_grid\n");*/
if(grid)
set_status_label_info(_("Grid"),_("Ok"));
else
set_status_label_info(_("Grid"),_("Nothing"));
/* printf("end dfine_grid\n");*/
return grid;
}
/*********************************************************************************/
Grid* compute_fed_grid_using_cube_grid(Grid* grid, gint n)
{
if(!grid) return NULL;
return define_grid_point_fed(grid->N,grid->limits,n);
}
/**************************************************************/
Grid* define_grid_electronic_density(gint N[],GridLimits limits)
{
Grid *grid = NULL;
GabEditTypeGrid TypeGridOld = TypeGrid;
gchar* t = g_strdup_printf(_("Computing Grid for electronic density"));
set_status_label_info(_("Grid"),t);
g_free(t);
CancelCalcul = FALSE;
TypeGrid = GABEDIT_TYPEGRID_EDENSITY;
grid = define_grid_point(N,limits,get_value_electronic_density);
TypeGrid = TypeGridOld;
if(grid) set_status_label_info(_("Grid"),_("Ok"));
else set_status_label_info(_("Grid"),_("Nothing"));
return grid;
}
/**************************************************************/
Grid* define_grid_FED(gint N[],GridLimits limits, gint n)
{
Grid *grid = NULL;
GabEditTypeGrid TypeGridOld = TypeGrid;
gchar* t = NULL;
if(n==0) t = g_strdup_printf(_("Computing FED Grid for a electrophilic reaction"));
else if(n==2) t = g_strdup_printf(_("Computing FED Grid for a nucleophilic reaction"));
else t = g_strdup_printf(_("Computing FED Grid for a radical reaction"));
set_status_label_info(_("Grid"),t);
g_free(t);
CancelCalcul = FALSE;
if(n==0) TypeGrid = GABEDIT_TYPEGRID_FEDELECTROPHILIC;
else if(n==2) TypeGrid = GABEDIT_TYPEGRID_FEDNUCLEOPHILIC;
else TypeGrid = GABEDIT_TYPEGRID_FEDRADICAL;
grid = define_grid_point_fed(N,limits,n);
TypeGrid = TypeGridOld;
if(grid) set_status_label_info(_("Grid"),_("Ok"));
else set_status_label_info(_("Grid"),_("Nothing"));
return grid;
}
/**************************************************************/
Grid* define_grid_ELFBECKE(gint N[],GridLimits limits)
{
Grid *grid = NULL;
GabEditTypeGrid TypeGridOld = TypeGrid;
gchar* t = g_strdup_printf(_("Computing Grid for ELF(Becke)"));
set_status_label_info(_("Grid"),t);
g_free(t);
CancelCalcul = FALSE;
TypeGrid = GABEDIT_TYPEGRID_ELFBECKE;
grid = define_grid_point(N,limits,get_value_elf_becke);
TypeGrid = TypeGridOld;
if(grid) set_status_label_info(_("Grid"),_("Ok"));
else set_status_label_info(_("Grid"),_("Nothing"));
return grid;
}
/**************************************************************/
Grid* define_grid_ELFSAVIN(gint N[],GridLimits limits)
{
Grid *grid = NULL;
GabEditTypeGrid TypeGridOld = TypeGrid;
gchar* t = g_strdup_printf(_("Computing Grid for ELF(Savin)"));
set_status_label_info(_("Grid"),t);
g_free(t);
CancelCalcul = FALSE;
TypeGrid = GABEDIT_TYPEGRID_ELFSAVIN;
grid = define_grid_point(N,limits,get_value_elf_savin);
TypeGrid = TypeGridOld;
if(grid) set_status_label_info(_("Grid"),_("Ok"));
else set_status_label_info(_("Grid"),_("Nothing"));
return grid;
}
/**************************************************************/
Grid* define_grid_orb(gint N[],GridLimits limits, gint typeOrb, gint i)
{
Grid *grid = NULL;
GabEditTypeGrid TypeGridOld = TypeGrid;
gint TypeSelOrbOld = TypeSelOrb;
gint NumSelOrbOld = NumSelOrb;
gchar* t = g_strdup_printf(_("Computing Grid for orb # %d"),i);
set_status_label_info(_("Grid"),t);
g_free(t);
CancelCalcul = FALSE;
TypeGrid = GABEDIT_TYPEGRID_ORBITAL;
TypeSelOrb = typeOrb;
NumSelOrb = i;
grid = define_grid_point(N,limits,get_value_orbital);
TypeGrid = TypeGridOld;
TypeSelOrb = TypeSelOrbOld;
NumSelOrb = NumSelOrbOld;
if(grid) set_status_label_info(_("Grid"),_("Ok"));
else set_status_label_info(_("Grid"),_("Nothing"));
return grid;
}
/**************************************************************/
gboolean compute_coulomb_integrale_iijj(gint N[],GridLimits limits, gint typeOrbi, gint i, gint typeOrbj, gint j,
gdouble* pInteg, gdouble* pNormi, gdouble* pNormj, gdouble* pOverlap)
{
Grid *gridi = NULL;
Grid *gridj = NULL;
gint ki,li,mi;
gint kj,lj,mj;
gdouble scale;
gdouble normi = 0;
gdouble normj = 0;
gdouble overlap = 0;
gdouble r12 = 0;
gdouble xx,yy,zz;
gdouble integ = 0;
gdouble dv = 0;
gdouble PRECISION = 1e-10;
*pInteg = -1;
*pNormi = -1;
*pNormj = -1;
*pOverlap = -1;
gridi = define_grid_orb(N, limits, typeOrbi, i);
if(!gridi) return FALSE;
gridj = 0;
gridj = define_grid_orb(N, limits, typeOrbj, j);
if(!gridj) return FALSE;
set_status_label_info(_("Grid"),_("Comp. phi_i^2 and phi_j^2"));
scale = (gdouble)1.01/gridi->N[0];
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
#ifdef ENABLE_OMP
printf("# proc = %d\n", omp_get_num_procs ());
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of phi_i and phi_j, pleasse wait..."));
#endif
#pragma omp parallel for private(ki,li,mi) reduction(+:overlap)
#endif
for(ki=0;ki<gridi->N[0];ki++)
{
if(!CancelCalcul)
for(li=0;li<gridi->N[1];li++)
{
for(mi=0;mi<gridi->N[2];mi++)
{
overlap += gridi->point[ki][li][mi].C[3]*gridj->point[ki][li][mi].C[3];
gridi->point[ki][li][mi].C[3] = gridi->point[ki][li][mi].C[3]* gridi->point[ki][li][mi].C[3];
gridj->point[ki][li][mi].C[3] = gridj->point[ki][li][mi].C[3]* gridj->point[ki][li][mi].C[3];
}
}
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
#else
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
set_status_label_info(_("Grid"),_("Comp. <phi_i|phi_i>"));
scale = (gdouble)1.01/gridi->N[0];
#ifdef ENABLE_OMP
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of <phi_i|phi_i>, please wait..."));
#endif
#pragma omp parallel for private(ki,li,mi) reduction(+:normi)
#endif
for(ki=0;ki<gridi->N[0];ki++)
{
if(!CancelCalcul)
for(li=0;li<gridi->N[1];li++)
for(mi=0;mi<gridi->N[2];mi++)
normi += gridi->point[ki][li][mi].C[3];
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
#else
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
set_status_label_info(_("Grid"),_("Comp. <phi_j|phi_j>"));
scale = (gdouble)1.01/gridj->N[0];
#ifdef ENABLE_OMP
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of <phi_j|phi_j>, please wait..."));
#endif
#pragma omp parallel for private(ki,li,mi) reduction(+:normj)
#endif
for(ki=0;ki<gridj->N[0];ki++)
{
if(!CancelCalcul)
for(li=0;li<gridj->N[1];li++)
for(mi=0;mi<gridj->N[2];mi++)
normj += gridj->point[ki][li][mi].C[3];
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
#else
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
if(CancelCalcul)
{
free_grid(gridi);
free_grid(gridj);
return FALSE;
}
set_status_label_info(_("Grid"),_("Computing of Coulomb int."));
scale = (gdouble)1.01/gridi->N[0];
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
#ifdef ENABLE_OMP
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of Coulomb integral, please wait..."));
#endif
#pragma omp parallel for private(xx,yy,zz,r12,ki,li,mi,kj,lj,mj) reduction(+:integ)
#endif
for(ki=0;ki<gridi->N[0];ki++)
{
if(!CancelCalcul)
for(li=0;li<gridi->N[1];li++)
for(mi=0;mi<gridi->N[2];mi++)
for(kj=0;kj<gridj->N[0];kj++)
for(lj=0;lj<gridj->N[1];lj++)
for(mj=0;mj<gridj->N[2];mj++)
{
xx = gridi->point[ki][li][mi].C[0]-gridj->point[kj][lj][mj].C[0];
yy = gridi->point[ki][li][mi].C[1]-gridj->point[kj][lj][mj].C[1];
zz = gridi->point[ki][li][mi].C[2]-gridj->point[kj][lj][mj].C[2];
r12 = xx*xx+yy*yy+zz*zz;
if(r12>PRECISION)
integ += gridi->point[ki][li][mi].C[3]*gridj->point[kj][lj][mj].C[3]/sqrt(r12);
}
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
#else
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
#endif
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
xx = gridi->point[1][0][0].C[0]-gridi->point[0][0][0].C[0];
yy = gridi->point[0][1][0].C[1]-gridi->point[0][0][0].C[1];
zz = gridi->point[0][0][1].C[2]-gridi->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
free_grid(gridi);
free_grid(gridj);
if(CancelCalcul) return FALSE;
*pInteg = integ*dv*dv;
*pNormi = normi*dv;
*pNormj = normj*dv;
*pOverlap = overlap*dv;
return TRUE;
}
/*********************************************************************************/
void reset_boundary(Grid* grid, gint nBoundary)
{
gint i;
gint j;
gint k;
/* left */
for(i=0;i<nBoundary;i++)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
grid->point[i][j][k].C[3] = grid->point[nBoundary][j][k].C[3];
/* right */
for(i=grid->N[0]-nBoundary;i<grid->N[0];i++)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
grid->point[i][j][k].C[3] = grid->point[grid->N[0]-nBoundary-1][j][k].C[3];
/* front */
for(j=0;j<nBoundary;j++)
for(i=0;i<grid->N[0];i++)
for(k=0;k<grid->N[2];k++)
grid->point[i][j][k].C[3] = grid->point[i][nBoundary][k].C[3];
/* back */
for(j=grid->N[1]-nBoundary;j<grid->N[1];j++)
for(i=0;i<grid->N[0];i++)
for(k=0;k<grid->N[2];k++)
grid->point[i][j][k].C[3] = grid->point[i][grid->N[1]-nBoundary-1][k].C[3];
/* top */
for(k=0;k<nBoundary;k++)
for(j=0;j<grid->N[1];j++)
for(i=0;i<grid->N[0];i++)
grid->point[i][j][k].C[3] = grid->point[i][j][nBoundary].C[3];
/* bottom */
for(k=grid->N[2]-nBoundary;k<grid->N[2];k++)
for(j=0;j<grid->N[1];j++)
for(i=0;i<grid->N[0];i++)
grid->point[i][j][k].C[3] = grid->point[i][j][grid->N[2]-nBoundary-1].C[3];
}
/*******************************************************************************************/
Grid* get_grid_laplacian(Grid* grid, gint nBoundary)
{
gint i;
gint j;
gint k;
gdouble v;
Grid* lapGrid = NULL;
gdouble xh, yh, zh;
gdouble a, b, c;
gint N[3] = {0,0,0};
gdouble* fcx = NULL;
gdouble* fcy = NULL;
gdouble* fcz = NULL;
gdouble cc = 0;
GridLimits limits;
gdouble scale = 0;
gint n;
gboolean beg = TRUE;
if(grid==NULL) return NULL;
if(nBoundary<1) return NULL;
if(grid->N[0]<=2*nBoundary) return NULL;
if(grid->N[1]<=2*nBoundary) return NULL;
if(grid->N[2]<=2*nBoundary) return NULL;
for(n=0;n<3;n++) N[n] = grid->N[n];
i = 1; j = 0; k = 0;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
xh = sqrt(a*a+b*b+c*c);
i = 0; j = 1; k = 0;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
yh = sqrt(a*a+b*b+c*c);
i = 0; j = 0; k = 1;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
zh = sqrt(a*a+b*b+c*c);
fcx = g_malloc((nBoundary+1)*sizeof(gdouble));
fcy = g_malloc((nBoundary+1)*sizeof(gdouble));
fcz = g_malloc((nBoundary+1)*sizeof(gdouble));
getCoefsLaplacian(nBoundary, xh, yh, zh, fcx, fcy, fcz, &cc);
limits.MinMax[0][0] = grid->limits.MinMax[0][0];
limits.MinMax[1][0] = grid->limits.MinMax[1][0];
limits.MinMax[0][1] = grid->limits.MinMax[0][1];
limits.MinMax[1][1] = grid->limits.MinMax[1][1];
limits.MinMax[0][2] = grid->limits.MinMax[0][2];
limits.MinMax[1][2] = grid->limits.MinMax[1][2];
lapGrid = grid_point_alloc(N,limits);
progress_orb(0,GABEDIT_PROGORB_COMPLAPGRID,TRUE);
scale = (gdouble)1.01/lapGrid->N[0];
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
lapGrid->point[i][j][k].C[0] = grid->point[i][j][k].C[0];
lapGrid->point[i][j][k].C[1] = grid->point[i][j][k].C[1];
lapGrid->point[i][j][k].C[2] = grid->point[i][j][k].C[2];
lapGrid->point[i][j][k].C[3] = 0;
}
}
}
for(i=nBoundary;i<grid->N[0]-nBoundary;i++)
{
for(j=nBoundary;j<grid->N[1]-nBoundary;j++)
{
for(k=nBoundary;k<grid->N[2]-nBoundary;k++)
{
v = cc*grid->point[i][j][k].C[3];
for(n=1;n<=nBoundary;n++)
{
v += fcx[n] *(grid->point[i-n][j][k].C[3]+grid->point[i+n][j][k].C[3]);
v += fcy[n] *(grid->point[i][j-n][k].C[3]+grid->point[i][j+n][k].C[3]);
v += fcz[n] *(grid->point[i][j][k-n].C[3]+grid->point[i][j][k+n].C[3]);
}
lapGrid->point[i][j][k].C[3] = v;
if(beg)
{
beg = FALSE;
lapGrid->limits.MinMax[0][3] = v;
lapGrid->limits.MinMax[1][3] = v;
}
else
{
if(lapGrid->limits.MinMax[0][3]>v)
lapGrid->limits.MinMax[0][3] = v;
if(lapGrid->limits.MinMax[1][3]<v)
lapGrid->limits.MinMax[1][3] = v;
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPLAPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPLAPGRID,FALSE);
}
if(CancelCalcul)
{
lapGrid = free_grid(lapGrid);
}
else
{
reset_boundary(lapGrid, nBoundary);
}
g_free(fcx);
g_free(fcy);
g_free(fcz);
return lapGrid;
}
/*************************************************************************************/
Grid* get_grid_norm_gradient(Grid* grid, gint nBoundary)
{
gint i;
gint j;
gint k;
gint kn;
Grid* gardGrid = NULL;
gdouble xh, yh, zh;
gdouble a, b, c;
gint N[3] = {0,0,0};
gdouble* fcx = NULL;
gdouble* fcy = NULL;
gdouble* fcz = NULL;
gdouble cc = 0;
GridLimits limits;
gdouble scale = 0;
gint n;
gboolean beg = TRUE;
gdouble gx, gy, gz;
if(grid==NULL) return NULL;
if(nBoundary<1) return NULL;
if(grid->N[0]<=2*nBoundary) return NULL;
if(grid->N[1]<=2*nBoundary) return NULL;
if(grid->N[2]<=2*nBoundary) return NULL;
for(n=0;n<3;n++) N[n] = grid->N[n];
i = 1; j = 0; k = 0;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
xh = sqrt(a*a+b*b+c*c);
i = 0; j = 1; k = 0;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
yh = sqrt(a*a+b*b+c*c);
i = 0; j = 0; k = 1;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
zh = sqrt(a*a+b*b+c*c);
fcx = g_malloc((nBoundary)*sizeof(gdouble));
fcy = g_malloc((nBoundary)*sizeof(gdouble));
fcz = g_malloc((nBoundary)*sizeof(gdouble));
getCoefsGradient(nBoundary, xh, yh, zh, fcx, fcy, fcz);
limits.MinMax[0][0] = grid->limits.MinMax[0][0];
limits.MinMax[1][0] = grid->limits.MinMax[1][0];
limits.MinMax[0][1] = grid->limits.MinMax[0][1];
limits.MinMax[1][1] = grid->limits.MinMax[1][1];
limits.MinMax[0][2] = grid->limits.MinMax[0][2];
limits.MinMax[1][2] = grid->limits.MinMax[1][2];
gardGrid = grid_point_alloc(N,limits);
progress_orb(0,GABEDIT_PROGORB_COMPGRADGRID,TRUE);
scale = (gdouble)1.01/gardGrid->N[0];
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
gardGrid->point[i][j][k].C[0] = grid->point[i][j][k].C[0];
gardGrid->point[i][j][k].C[1] = grid->point[i][j][k].C[1];
gardGrid->point[i][j][k].C[2] = grid->point[i][j][k].C[2];
gardGrid->point[i][j][k].C[3] = 0;
}
}
}
for(i=nBoundary;i<grid->N[0]-nBoundary;i++)
{
for(j=nBoundary;j<grid->N[1]-nBoundary;j++)
{
for(k=nBoundary;k<grid->N[2]-nBoundary;k++)
{
gx = gy = gz = 0.0;
for(n=-nBoundary, kn=0 ; kn<nBoundary ; n++, kn++)
{
gx += fcx[kn] * (grid->point[i+n][j][k].C[3]-grid->point[i-n][j][k].C[3]);
gy += fcy[kn] * (grid->point[i][j+n][k].C[3]-grid->point[i][j-n][k].C[3]);
gz += fcz[kn] * (grid->point[i][j][k+n].C[3]-grid->point[i][j][k-n].C[3]) ;
}
gardGrid->point[i][j][k].C[3] = sqrt(gx*gx+gy*gy+gz*gz);
if(beg)
{
beg = FALSE;
gardGrid->limits.MinMax[0][3] = gardGrid->point[i][j][k].C[3];
gardGrid->limits.MinMax[1][3] = gardGrid->point[i][j][k].C[3];
}
else
{
if(gardGrid->limits.MinMax[0][3]>gardGrid->point[i][j][k].C[3])
gardGrid->limits.MinMax[0][3] = gardGrid->point[i][j][k].C[3];
if(gardGrid->limits.MinMax[1][3]<gardGrid->point[i][j][k].C[3])
gardGrid->limits.MinMax[1][3] = gardGrid->point[i][j][k].C[3];
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRADGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRADGRID,FALSE);
}
if(CancelCalcul)
{
gardGrid = free_grid(gardGrid);
}
else
{
reset_boundary(gardGrid, nBoundary);
}
g_free(fcx);
g_free(fcy);
g_free(fcz);
return gardGrid;
}
/*******************************************************************************************/
Grid* get_grid_sign_lambda2_density(Grid* grid, gint nBoundary)
{
gint i;
gint j;
gint k;
gint kn;
Grid* sl2Grid = NULL;
gdouble xh, yh, zh;
gdouble a, b, c;
gint N[3] = {0,0,0};
gdouble* fcx = NULL;
gdouble* fcy = NULL;
gdouble* fcz = NULL;
gdouble cc = 0;
GridLimits limits;
gdouble scale = 0;
gint n;
gboolean beg = TRUE;
gdouble gx, gy, gz;
gdouble PRECISION = 1.0e-60;
gdouble lambda2;
gdouble* lfcx = NULL;
gdouble* lfcy = NULL;
gdouble* lfcz = NULL;
gdouble rho;
gdouble lcc;
if(grid==NULL) return NULL;
if(nBoundary<1) return NULL;
if(grid->N[0]<=2*nBoundary) return NULL;
if(grid->N[1]<=2*nBoundary) return NULL;
if(grid->N[2]<=2*nBoundary) return NULL;
for(n=0;n<3;n++) N[n] = grid->N[n];
i = 1; j = 0; k = 0;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
xh = sqrt(a*a+b*b+c*c);
i = 0; j = 1; k = 0;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
yh = sqrt(a*a+b*b+c*c);
i = 0; j = 0; k = 1;
a = grid->point[i][j][k].C[0]-grid->point[0][0][0].C[0];
b = grid->point[i][j][k].C[1]-grid->point[0][0][0].C[1];
c = grid->point[i][j][k].C[2]-grid->point[0][0][0].C[2];
zh = sqrt(a*a+b*b+c*c);
fcx = g_malloc((nBoundary)*sizeof(gdouble));
fcy = g_malloc((nBoundary)*sizeof(gdouble));
fcz = g_malloc((nBoundary)*sizeof(gdouble));
getCoefsGradient(nBoundary, xh, yh, zh, fcx, fcy, fcz);
lfcx = g_malloc((nBoundary+1)*sizeof(gdouble));
lfcy = g_malloc((nBoundary+1)*sizeof(gdouble));
lfcz = g_malloc((nBoundary+1)*sizeof(gdouble));
getCoefsLaplacian(nBoundary, xh, yh, zh, lfcx, lfcy, lfcz, &lcc);
limits.MinMax[0][0] = grid->limits.MinMax[0][0];
limits.MinMax[1][0] = grid->limits.MinMax[1][0];
limits.MinMax[0][1] = grid->limits.MinMax[0][1];
limits.MinMax[1][1] = grid->limits.MinMax[1][1];
limits.MinMax[0][2] = grid->limits.MinMax[0][2];
limits.MinMax[1][2] = grid->limits.MinMax[1][2];
sl2Grid = grid_point_alloc(N,limits);
progress_orb(0,GABEDIT_PROGORB_COMPL2GRID,TRUE);
scale = (gdouble)1.01/sl2Grid->N[0];
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
sl2Grid->point[i][j][k].C[0] = grid->point[i][j][k].C[0];
sl2Grid->point[i][j][k].C[1] = grid->point[i][j][k].C[1];
sl2Grid->point[i][j][k].C[2] = grid->point[i][j][k].C[2];
sl2Grid->point[i][j][k].C[3] = grid->point[i][j][k].C[3];
}
}
}
for(i=nBoundary;i<grid->N[0]-nBoundary;i++)
{
for(j=nBoundary;j<grid->N[1]-nBoundary;j++)
{
for(k=nBoundary;k<grid->N[2]-nBoundary;k++)
{
lambda2 = getLambda2(grid,i, j, k, fcx, fcy, fcz, lfcx, lfcy, lfcz, nBoundary);
if(lambda2<0) sl2Grid->point[i][j][k].C[3] = -sl2Grid->point[i][j][k].C[3];
if(beg)
{
beg = FALSE;
sl2Grid->limits.MinMax[0][3] = sl2Grid->point[i][j][k].C[3];
sl2Grid->limits.MinMax[1][3] = sl2Grid->point[i][j][k].C[3];
}
else
{
if(sl2Grid->limits.MinMax[0][3]>sl2Grid->point[i][j][k].C[3])
sl2Grid->limits.MinMax[0][3] = sl2Grid->point[i][j][k].C[3];
if(sl2Grid->limits.MinMax[1][3]<sl2Grid->point[i][j][k].C[3])
sl2Grid->limits.MinMax[1][3] = sl2Grid->point[i][j][k].C[3];
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPL2GRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPL2GRID,FALSE);
}
if(CancelCalcul)
{
sl2Grid = free_grid(sl2Grid);
}
else
{
reset_boundary(sl2Grid, nBoundary);
}
g_free(fcx);
g_free(fcy);
g_free(fcz);
g_free(lfcx);
g_free(lfcy);
g_free(lfcz);
return sl2Grid;
}
/*********************************************************************************/
static gdouble get_grad_value_STF(gdouble x,gdouble y,gdouble z,gint i,gint n,gint id)
{
/*
gdouble v = 0.0;
gdouble d = 0;
gdouble xi = x-SAOrb[i].Stf[n].C[0];
gdouble yi = y-SAOrb[i].Stf[n].C[1];
gdouble zi = z-SAOrb[i].Stf[n].C[2];
gint l[3] = {SAOrb[i].Stf[n].l[0],SAOrb[i].Stf[n].l[1],SAOrb[i].Stf[n].l[2]};
d = (xi*xi)+(yi*yi)+(zi*zi);
d *=SAOrb[i].Stf[n].Ex;
if(d>40) return 1e-14;
d = exp(-d);
l[id]++;
v = -2*SAOrb[i].Stf[n].Ex*SAOrb[i].Stf[n].Coef*
pow(xi,l[0])* pow(yi,l[1])* pow(zi,l[2])*d;
l[id]-=2;
if(l[id]>=0)
v+= (l[id]+1)*SAOrb[i].Stf[n].Coef*
pow(xi,l[0])* pow(yi,l[1])* pow(zi,l[2])*d;
return v;
*/
return 0;
}
/*********************************************************************************/
static gdouble get_grad_value_CSTF(gdouble x,gdouble y,gdouble z,gint i, gint id)
{
gdouble v = 0.0;
gint n;
for(n=0;n<SAOrb[i].N;n++)
v+= get_grad_value_STF(x,y,z,i,n,id);
return v;
}
/*********************************************************************************/
static gdouble get_grad_value_GTF(gdouble x,gdouble y,gdouble z,gint i,gint n,gint id)
{
gdouble v = 0.0;
gdouble d = 0;
gdouble xi = x-AOrb[i].Gtf[n].C[0];
gdouble yi = y-AOrb[i].Gtf[n].C[1];
gdouble zi = z-AOrb[i].Gtf[n].C[2];
gint l[3] = {AOrb[i].Gtf[n].l[0],AOrb[i].Gtf[n].l[1],AOrb[i].Gtf[n].l[2]};
d = (xi*xi)+(yi*yi)+(zi*zi);
d *=AOrb[i].Gtf[n].Ex;
if(d>40) return 1e-14;
d = exp(-d);
l[id]++;
v = -2*AOrb[i].Gtf[n].Ex*AOrb[i].Gtf[n].Coef*
pow(xi,l[0])* pow(yi,l[1])* pow(zi,l[2])*d;
l[id]-=2;
if(l[id]>=0)
v+= (l[id]+1)*AOrb[i].Gtf[n].Coef*
pow(xi,l[0])* pow(yi,l[1])* pow(zi,l[2])*d;
return v;
}
/*********************************************************************************/
static gdouble get_grad_value_CGTF(gdouble x,gdouble y,gdouble z,gint i, gint id)
{
gdouble v = 0.0;
gint n;
for(n=0;n<AOrb[i].numberOfFunctions;n++)
v+= get_grad_value_GTF(x,y,z,i,n,id);
return v;
}
/*********************************************************************************/
static gdouble get_grad_value_CBTF(gdouble x,gdouble y,gdouble z,gint i, gint id)
{
if(AOrb) return get_grad_value_CGTF(x, y, z, i, id);
else if(SAOrb) return get_grad_value_CSTF(x, y, z, i,id);
else return 0;
}
/*********************************************************************************/
/*
static gdouble get_grad_value_orbital(gdouble x,gdouble y,gdouble z,gint k, gint id)
{
gdouble v=0.0;
gint i;
if(TypeSelOrb == 1)
for(i=0;i<NAOrb;i++)
{
if(fabs(CoefAlphaOrbitals[k][i])>1e-10)
v+=CoefAlphaOrbitals[k][i]*get_grad_value_CBTF(x,y,z,i,id);
}
else
for(i=0;i<NAOrb;i++)
{
if(fabs(CoefBetaOrbitals[k][i])>1e-10)
v+=CoefBetaOrbitals[k][i]*get_grad_value_CBTF(x,y,z,i,id);
}
return v;
}
*/
/*********************************************************************************/
/*
static gdouble get_norm2_grad_value_orbital(gdouble x,gdouble y,gdouble z,gint k)
{
gdouble vx = get_grad_value_orbital( x, y, z, k, 0);
gdouble vy = get_grad_value_orbital( x, y, z, k, 1);
gdouble vz = get_grad_value_orbital( x, y, z, k, 2);
return vx*vx+vy*vy+vz*vz;
}
*/
/*********************************************************************************/
/*
static gdouble get_norm_grad_value_orbital(gdouble x,gdouble y,gdouble z,gint k)
{
return sqrt(get_norm2_grad_value_orbital(x,y,z,k));
}
*/
/*********************************************************************************/
/*
static gdouble get_grad_value_electronic_density(gdouble x,gdouble y,gdouble z,gint id)
{
gdouble v1 = 0.0;
gdouble v2 = 0.0;
gdouble cgv = 0.0;
gdouble dcgv = 0.0;
gint i;
gint k1;
gint k2;
gdouble *PhiAlpha = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *dPhiAlpha = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *PhiBeta = g_malloc(NBetaOrb*sizeof(gdouble));
gdouble *dPhiBeta = g_malloc(NBetaOrb*sizeof(gdouble));
for(k1=0;k1<NAlphaOrb;k1++)
{
PhiAlpha[k1] = 0.0;
dPhiAlpha[k1] = 0.0;
}
for(k2=0;k2<NBetaOrb;k2++)
{
PhiBeta[k2] = 0.0;
dPhiBeta[k2] = 0.0;
}
for(i=0;i<NAOrb;i++)
{
cgv = get_value_CBTF(x,y,z,i);
dcgv = get_grad_value_CBTF(x,y,z,i,id);
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
{
PhiAlpha[k1] += CoefAlphaOrbitals[k1][i]*cgv;
dPhiAlpha[k1] += CoefAlphaOrbitals[k1][i]*dcgv;
}
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
{
PhiBeta[k2] += CoefBetaOrbitals[k2][i]*cgv;
dPhiBeta[k2] += CoefBetaOrbitals[k2][i]*dcgv;
}
}
v1 = 0.0;
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
v1 += OccAlphaOrbitals[k1]*PhiAlpha[k1]*dPhiAlpha[k1];
v2 = 0.0;
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
v2 += OccBetaOrbitals[k2]*PhiBeta[k2]*dPhiBeta[k2];
g_free(PhiAlpha);
g_free(dPhiAlpha);
g_free(PhiBeta);
g_free(dPhiBeta);
return 2*(v1+v2);
}
*/
/*********************************************************************************/
/*
static gdouble get_norm2_grad_value_electronic_density(gdouble x,gdouble y,gdouble z)
{
gdouble vx = get_grad_value_electronic_density( x, y, z, 0);
gdouble vy = get_grad_value_electronic_density( x, y, z, 1);
gdouble vz = get_grad_value_electronic_density( x, y, z, 2);
return vx*vx+vy*vy+vz*vz;
}
*/
/*********************************************************************************/
/*
f (x,y,z) =
(2 - n)/2
{
[sum_j(1 to N) O_j Phi_j (x,y,z)^2 e^(-alpha(e_HOMO -e_j ))]/
[sum_j(1 to N) O_j e^(-alpha(e_HOMO -e_j ))]
}
+
n/2
{
[sum_j(1 to N) (2-O_j) Phi_j (x,y,z)^2 e^(+alpha(e_LUMO -e_j ))]/
[sum_j(1 to N) (2-O_j) e^(+alpha(e_LUMO -e_j ))]
}
n = 0 for an electrophilic reaction,
1 for a radical reaction, and
2 for a nucleophilic reaction.
N is the number of orbitals.
O_j is the number of electrons in orbital j.
Phi_j(x,y,z) is the value of the orbital j at point (x,y,z).
e_j is the energy of orbital j.
*/
static gdouble get_value_fed(gdouble x,gdouble y,gdouble z,gdouble alpha, gint n, gdouble eHOMO, gdouble eLUMO)
{
gdouble s1_1 = 0.0;
gdouble s1_2 = 0.0;
gdouble s2_1 = 0.0;
gdouble s2_2 = 0.0;
gdouble de = 0;
gdouble d = 0;
gdouble cgv;
gint i;
gint k1;
gint k2;
gdouble *PhiAlpha = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *PhiBeta = g_malloc(NBetaOrb*sizeof(gdouble));
for(k1=0;k1<NAlphaOrb;k1++) PhiAlpha[k1] = 0.0;
for(k2=0;k2<NBetaOrb;k2++) PhiBeta[k2] = 0.0;
for(i=0;i<NAOrb;i++)
{
cgv = get_value_CBTF(x,y,z,i);
for(k1=0;k1<NAlphaOrb;k1++) PhiAlpha[k1] += CoefAlphaOrbitals[k1][i]*cgv;
for(k2=0;k2<NBetaOrb;k2++) PhiBeta[k2] += CoefBetaOrbitals[k2][i]*cgv;
}
for(k1=0;k1<NAlphaOrb;k1++) PhiAlpha[k1] = PhiAlpha[k1] * PhiAlpha[k1] ;
for(k2=0;k2<NBetaOrb;k2++) PhiBeta[k2] = PhiBeta[k2] * PhiBeta[k2] ;
if(n!=2)
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
{
de = exp(-alpha*(eHOMO-EnerAlphaOrbitals[k1]));
d = OccAlphaOrbitals[k1]*de;
s1_1 += d*PhiAlpha[k1];
s1_2 += d;
}
if(n!=2)
for(k1=0;k1<NBetaOrb;k1++)
if(OccBetaOrbitals[k1]>1e-8)
{
de = exp(-alpha*(eHOMO-EnerBetaOrbitals[k1]));
d = OccBetaOrbitals[k1]* de;
s1_1 += d*PhiBeta[k1];
s1_2 += d;
}
if(n!=0)
for(k1=0;k1<NAlphaOrb;k1++)
if(fabs(1-OccAlphaOrbitals[k1])>1e-8)
{
de = exp(alpha*(eLUMO-EnerAlphaOrbitals[k1]));
d = (1-OccAlphaOrbitals[k1])*de;
s2_1 += d*PhiAlpha[k1];
s2_2 += d;
}
if(n!=0)
for(k1=0;k1<NBetaOrb;k1++)
if(fabs(1-OccBetaOrbitals[k1])>1e-8)
{
de = exp(alpha*(eLUMO-EnerBetaOrbitals[k1]));
d = (1-OccBetaOrbitals[k1])*de;
s2_1 += d*PhiBeta[k1];
s2_2 += d;
}
g_free(PhiAlpha);
g_free(PhiBeta);
if(s1_2<1e-12) s1_2 = 1;
if(s2_2<1e-12) s2_2 = 1;
return (2.-n)/2.*s1_1/s1_2 + n/2.*s2_1/s2_2;
}
/****************************************************************/
static gdouble get_energy_homo()
{
gdouble e = 0;
gint k;
if(NAlphaOrb<1) return 1e10;
e =EnerAlphaOrbitals[0];
for(k=1;k<NAlphaOrb;k++)
if(OccAlphaOrbitals[k]>1e-8 && EnerAlphaOrbitals[k]>e) e =EnerAlphaOrbitals[k];
for(k=0;k<NBetaOrb;k++)
if(OccBetaOrbitals[k]>1e-8 && EnerBetaOrbitals[k]>e) e = EnerBetaOrbitals[k];
return e;
}
/****************************************************************/
static gdouble get_energy_lumo()
{
gdouble e = 0;
gint k;
gboolean begin = TRUE;
if(NAlphaOrb<1) return 1e10;
e =EnerAlphaOrbitals[0];
for(k=0;k<NAlphaOrb;k++)
if(OccAlphaOrbitals[k]<1e-8)
if(begin || EnerAlphaOrbitals[k]<e)
{
e =EnerAlphaOrbitals[k];
begin = FALSE;
}
for(k=0;k<NBetaOrb;k++)
if(OccBetaOrbitals[k]<1e-8)
if(begin || EnerBetaOrbitals[k]<e)
{
e =EnerBetaOrbitals[k];
begin = FALSE;
}
return e;
}
/*********************************************************************************/
static gdouble get_value_elf_becke(gdouble x,gdouble y,gdouble z,gint dump)
{
gdouble grho2 = 0;
gdouble rho = 0;
gdouble sphi = 0;
gdouble D = 0;
gdouble Dh = 0;
gdouble co = 3.0/5.0*pow(6*PI*PI,2.0/3);
gdouble XBE2 = 0;
gdouble v1X = 0.0;
gdouble v1Y = 0.0;
gdouble v1Z = 0.0;
gdouble v2X = 0.0;
gdouble v2Y = 0.0;
gdouble v2Z = 0.0;
gdouble vX = 0.0;
gdouble vY = 0.0;
gdouble vZ = 0.0;
gdouble cgv = 0.0;
gdouble dcgvX = 0.0;
gdouble dcgvY = 0.0;
gdouble dcgvZ = 0.0;
gint i;
gint k1;
gint k2;
gdouble *PhiAlpha = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *dPhiAlphaX = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *dPhiAlphaY = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *dPhiAlphaZ = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *PhiBeta = g_malloc(NBetaOrb*sizeof(gdouble));
gdouble *dPhiBetaX = g_malloc(NBetaOrb*sizeof(gdouble));
gdouble *dPhiBetaY = g_malloc(NBetaOrb*sizeof(gdouble));
gdouble *dPhiBetaZ = g_malloc(NBetaOrb*sizeof(gdouble));
for(k1=0;k1<NAlphaOrb;k1++)
{
PhiAlpha[k1] = 0.0;
dPhiAlphaX[k1] = 0.0;
dPhiAlphaY[k1] = 0.0;
dPhiAlphaZ[k1] = 0.0;
}
for(k2=0;k2<NBetaOrb;k2++)
{
PhiBeta[k2] = 0.0;
dPhiBetaX[k2] = 0.0;
dPhiBetaY[k2] = 0.0;
dPhiBetaZ[k2] = 0.0;
}
for(i=0;i<NAOrb;i++)
{
cgv = get_value_CBTF(x,y,z,i);
dcgvX = get_grad_value_CBTF(x,y,z,i,0);
dcgvY = get_grad_value_CBTF(x,y,z,i,1);
dcgvZ = get_grad_value_CBTF(x,y,z,i,2);
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
{
PhiAlpha[k1] += CoefAlphaOrbitals[k1][i]*cgv;
dPhiAlphaX[k1] += CoefAlphaOrbitals[k1][i]*dcgvX;
dPhiAlphaY[k1] += CoefAlphaOrbitals[k1][i]*dcgvY;
dPhiAlphaZ[k1] += CoefAlphaOrbitals[k1][i]*dcgvZ;
}
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
{
PhiBeta[k2] += CoefBetaOrbitals[k2][i]*cgv;
dPhiBetaX[k2] += CoefBetaOrbitals[k2][i]*dcgvX;
dPhiBetaY[k2] += CoefBetaOrbitals[k2][i]*dcgvY;
dPhiBetaZ[k2] += CoefBetaOrbitals[k2][i]*dcgvZ;
}
}
v1X = 0.0;
v1Y = 0.0;
v1Z = 0.0;
rho = 0;
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
{
v1X += OccAlphaOrbitals[k1]*PhiAlpha[k1]*dPhiAlphaX[k1];
v1Y += OccAlphaOrbitals[k1]*PhiAlpha[k1]*dPhiAlphaY[k1];
v1Z += OccAlphaOrbitals[k1]*PhiAlpha[k1]*dPhiAlphaZ[k1];
rho += OccAlphaOrbitals[k1]*PhiAlpha[k1]*PhiAlpha[k1];
sphi += OccAlphaOrbitals[k1]*(
dPhiAlphaX[k1]*dPhiAlphaX[k1]
+ dPhiAlphaY[k1]*dPhiAlphaY[k1]
+ dPhiAlphaZ[k1]*dPhiAlphaZ[k1]
);
}
v2X = 0.0;
v2Y = 0.0;
v2Z = 0.0;
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
{
v2X += OccBetaOrbitals[k2]*PhiBeta[k2]*dPhiBetaX[k2];
v2Y += OccBetaOrbitals[k2]*PhiBeta[k2]*dPhiBetaY[k2];
v2Z += OccBetaOrbitals[k2]*PhiBeta[k2]*dPhiBetaZ[k2];
rho += OccBetaOrbitals[k2]*PhiBeta[k2]*PhiBeta[k2];
sphi +=OccBetaOrbitals[k2]*(
dPhiBetaX[k2]*dPhiBetaX[k2]
+ dPhiBetaY[k2]*dPhiBetaY[k2]
+ dPhiBetaZ[k2]*dPhiBetaZ[k2]
);
}
vX = (v1X+v2X)*2;
vY = (v1Y+v2Y)*2;
vZ = (v1Z+v2Z)*2;
grho2 = vX*vX + vY*vY +vZ*vZ ;
g_free(PhiAlpha);
g_free(dPhiAlphaX);
g_free(dPhiAlphaY);
g_free(dPhiAlphaZ);
g_free(PhiBeta);
g_free(dPhiBetaX);
g_free(dPhiBetaY);
g_free(dPhiBetaZ);
D = sphi - grho2/4.0/rho;
Dh = co*pow(rho,5.0/3.0);
XBE2 = D/Dh;
XBE2 = XBE2*XBE2;
return 1.0/(1.0+XBE2);
}
/*********************************************************************************/
static gdouble get_value_elf_savin(gdouble x,gdouble y,gdouble z,gint dump)
{
gdouble grho2 = 0;
gdouble rho = 0;
gdouble sphi = 0;
gdouble t = 0;
gdouble th = 0;
gdouble cf = 3.0/10.0*pow(3*PI*PI,2.0/3);
gdouble XS2 = 0;
gdouble epsilon = 2.87e-5; /* This value constrains ELF to be less than 0.5 for rho<1e-3*/
/* see Can. J. Chem. Vol. 74,1996 page 1088 */
gdouble v1X = 0.0;
gdouble v1Y = 0.0;
gdouble v1Z = 0.0;
gdouble v2X = 0.0;
gdouble v2Y = 0.0;
gdouble v2Z = 0.0;
gdouble vX = 0.0;
gdouble vY = 0.0;
gdouble vZ = 0.0;
gdouble cgv = 0.0;
gdouble dcgvX = 0.0;
gdouble dcgvY = 0.0;
gdouble dcgvZ = 0.0;
gint i;
gint k1;
gint k2;
gdouble *PhiAlpha = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *dPhiAlphaX = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *dPhiAlphaY = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *dPhiAlphaZ = g_malloc(NAlphaOrb*sizeof(gdouble));
gdouble *PhiBeta = g_malloc(NBetaOrb*sizeof(gdouble));
gdouble *dPhiBetaX = g_malloc(NBetaOrb*sizeof(gdouble));
gdouble *dPhiBetaY = g_malloc(NBetaOrb*sizeof(gdouble));
gdouble *dPhiBetaZ = g_malloc(NBetaOrb*sizeof(gdouble));
for(k1=0;k1<NAlphaOrb;k1++)
{
PhiAlpha[k1] = 0.0;
dPhiAlphaX[k1] = 0.0;
dPhiAlphaY[k1] = 0.0;
dPhiAlphaZ[k1] = 0.0;
}
for(k2=0;k2<NBetaOrb;k2++)
{
PhiBeta[k2] = 0.0;
dPhiBetaX[k2] = 0.0;
dPhiBetaY[k2] = 0.0;
dPhiBetaZ[k2] = 0.0;
}
for(i=0;i<NAOrb;i++)
{
cgv = get_value_CBTF(x,y,z,i);
dcgvX = get_grad_value_CBTF(x,y,z,i,0);
dcgvY = get_grad_value_CBTF(x,y,z,i,1);
dcgvZ = get_grad_value_CBTF(x,y,z,i,2);
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
{
PhiAlpha[k1] += CoefAlphaOrbitals[k1][i]*cgv;
dPhiAlphaX[k1] += CoefAlphaOrbitals[k1][i]*dcgvX;
dPhiAlphaY[k1] += CoefAlphaOrbitals[k1][i]*dcgvY;
dPhiAlphaZ[k1] += CoefAlphaOrbitals[k1][i]*dcgvZ;
}
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
{
PhiBeta[k2] += CoefBetaOrbitals[k2][i]*cgv;
dPhiBetaX[k2] += CoefBetaOrbitals[k2][i]*dcgvX;
dPhiBetaY[k2] += CoefBetaOrbitals[k2][i]*dcgvY;
dPhiBetaZ[k2] += CoefBetaOrbitals[k2][i]*dcgvZ;
}
}
v1X = 0.0;
v1Y = 0.0;
v1Z = 0.0;
rho = 0;
for(k1=0;k1<NAlphaOrb;k1++)
if(OccAlphaOrbitals[k1]>1e-8)
{
v1X += OccAlphaOrbitals[k1]*(PhiAlpha[k1]*dPhiAlphaX[k1]);
v1Y += OccAlphaOrbitals[k1]*(PhiAlpha[k1]*dPhiAlphaY[k1]);
v1Z += OccAlphaOrbitals[k1]*(PhiAlpha[k1]*dPhiAlphaZ[k1]);
rho += OccAlphaOrbitals[k1]*PhiAlpha[k1]*PhiAlpha[k1];
sphi += OccAlphaOrbitals[k1]*(
dPhiAlphaX[k1]*dPhiAlphaX[k1]
+ dPhiAlphaY[k1]*dPhiAlphaY[k1]
+ dPhiAlphaZ[k1]*dPhiAlphaZ[k1]
);
}
v2X = 0.0;
v2Y = 0.0;
v2Z = 0.0;
for(k2=0;k2<NBetaOrb;k2++)
if(OccBetaOrbitals[k2]>1e-8)
{
v2X += OccBetaOrbitals[k2]*(PhiBeta[k2]*dPhiBetaX[k2]);
v2Y += OccBetaOrbitals[k2]*(PhiBeta[k2]*dPhiBetaY[k2]);
v2Z += OccBetaOrbitals[k2]*(PhiBeta[k2]*dPhiBetaZ[k2]);
rho += OccBetaOrbitals[k2]*PhiBeta[k2]*PhiBeta[k2];
sphi +=OccBetaOrbitals[k2]*(
dPhiBetaX[k2]*dPhiBetaX[k2]
+ dPhiBetaY[k2]*dPhiBetaY[k2]
+ dPhiBetaZ[k2]*dPhiBetaZ[k2]
);
}
vX = (v1X+v2X)*2;
vY = (v1Y+v2Y)*2;
vZ = (v1Z+v2Z)*2;
grho2 = vX*vX + vY*vY +vZ*vZ ;
g_free(PhiAlpha);
g_free(dPhiAlphaX);
g_free(dPhiAlphaY);
g_free(dPhiAlphaZ);
g_free(PhiBeta);
g_free(dPhiBetaX);
g_free(dPhiBetaY);
g_free(dPhiBetaZ);
t = sphi/2 - grho2/8.0/rho;
th = cf*pow(rho,5.0/3.0);
XS2 = (t+epsilon)/th;
XS2 = XS2*XS2;
return 1.0/(1.0+XS2);
}
/*********************************************************************************/
static gdouble get_value_sas(gdouble x,gdouble y,gdouble z,gint dump)
{
gdouble RProb = solventRadius*ANG_TO_BOHR;
gdouble ri2 = 0;
gdouble ri6 = 0;
gdouble xi = 0;
gdouble yi = 0;
gdouble zi = 0;
gdouble sigmai = 0;
gdouble sigmai2 = 0;
gdouble sigmai6 = 0;
gdouble v = 0;
gdouble t = 0;
gint i;
gdouble PRECISION = 1e-10;
for(i=0;i<Ncenters;i++)
{
xi = x-GeomOrb[i].C[0];
yi = y-GeomOrb[i].C[1];
zi = z-GeomOrb[i].C[2];
ri2 = xi*xi+yi*yi+zi*zi;
ri2 += PRECISION;
ri6 = ri2*ri2*ri2;
sigmai = GeomOrb[i].Prop.vanDerWaalsRadii + RProb;
sigmai2 = sigmai*sigmai;
sigmai6 = sigmai2*sigmai2*sigmai2;
t = sigmai6/ri6;
v += t*t - t;
}
if(v>1e10) v = 1e10;
return v;
}
/*********************************************************************************/
gdouble** compute_multipol_from_grid(Grid* grid, gint lmax, gdouble xOff, gdouble yOff, gdouble zOff)
{
gint i;
gint j;
gint k;
gint l;
gint m;
gdouble x;
gdouble y;
gdouble z;
gdouble r;
gdouble temp;
gdouble p;
gdouble** Q = g_malloc((lmax+1)*sizeof(gdouble*));
Zlm** slm = g_malloc((lmax+1)*sizeof(Zlm*));
gdouble PRECISION = 1e-13;
gdouble dv = 0;
gdouble scale;
for(l=0;l<=lmax;l++)
{
Q[l] = g_malloc((2*l+1)*sizeof(gdouble));
slm[l] = g_malloc((2*l+1)*sizeof(Zlm));
for(m=-l;m<=l;m++)
{
Q[l][l+m] = 0.0;
slm[l][l+m]=getZlm(l,m);
}
}
dv = (grid->point[1][0][0].C[0]-grid->point[0][0][0].C[0])*
(grid->point[0][1][0].C[1]-grid->point[0][0][0].C[1])*
(grid->point[0][0][1].C[2]-grid->point[0][0][0].C[2]);
dv = -fabs(dv);
progress_orb(0,GABEDIT_PROGORB_COMPMULTIPOL,TRUE);
scale = (gdouble)1.01/grid->N[0];
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
x = grid->point[i][j][k].C[0]-xOff;
y = grid->point[i][j][k].C[1]-yOff;
z = grid->point[i][j][k].C[2]-zOff;
r = sqrt(x*x + y*y + z*z+PRECISION);
temp = grid->point[i][j][k].C[3]*dv;
x /= r;
y /= r;
z /= r;
for(l=0; l<=lmax; l++)
{
p = temp*pow(r,l);
for(m=-l; m<=l; m++)
{
Q[l][m+l] += p*getValueZlm(&slm[l][m+l],x,y,z);
}
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPMULTIPOL,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPMULTIPOL,FALSE);
}
for(l=0;l<=lmax;l++)
if(slm[l])g_free(slm[l]);
if(slm) g_free(slm);
if(CancelCalcul)
{
if(Q)
{
for(l=0;l<=lmax;l++)
if(Q[l])g_free(Q[l]);
g_free(Q);
}
Q = NULL;
}
return Q;
}
/**************************************************************/
static void define_xyz_grid(Grid*grid)
{
gint i;
gint j;
gint k;
gdouble x;
gdouble y;
gdouble z;
gdouble V0[3];
gdouble V1[3];
gdouble V2[3];
gdouble firstPoint[3];
for(i=0;i<3;i++)
{
V0[i] = firstDirection[i] *(grid->limits.MinMax[1][0]-grid->limits.MinMax[0][0]);
V1[i] = secondDirection[i]*(grid->limits.MinMax[1][1]-grid->limits.MinMax[0][1]);
V2[i] = thirdDirection[i] *(grid->limits.MinMax[1][2]-grid->limits.MinMax[0][2]);
}
for(i=0;i<3;i++)
{
firstPoint[i] = V0[i] + V1[i] + V2[i];
/* firstPoint[i] = originOfCube[i] - firstPoint[i]/2;*/
firstPoint[i] = limits.MinMax[0][i];
}
for(i=0;i<3;i++)
{
V0[i] /= grid->N[0]-1;
V1[i] /= grid->N[1]-1;
V2[i] /= grid->N[2]-1;
}
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
x = firstPoint[0] + i*V0[0] + j*V1[0] + k*V2[0];
y = firstPoint[1] + i*V0[1] + j*V1[1] + k*V2[1];
z = firstPoint[2] + i*V0[2] + j*V1[2] + k*V2[2];
grid->point[i][j][k].C[0] = x;
grid->point[i][j][k].C[1] = y;
grid->point[i][j][k].C[2] = z;
}
}
}
}
/*********************************************************************************/
Grid* compute_mep_grid_using_partial_charges_cube_grid(Grid* grid)
{
gint i;
gint j;
gint k;
gdouble x;
gdouble y;
gdouble z;
gdouble r;
gdouble temp;
gdouble PRECISION = 1e-13;
Grid* esp = NULL;
gdouble invR = 1.0;
gdouble v;
gint n;
gboolean beg = TRUE;
gdouble scale;
if(!grid) return NULL;
esp = grid_point_alloc(grid->N,grid->limits);
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
scale = (gdouble)1.01/grid->N[0];
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
x = grid->point[i][j][k].C[0];
y = grid->point[i][j][k].C[1];
z = grid->point[i][j][k].C[2];
esp->point[i][j][k].C[0] = x;
esp->point[i][j][k].C[1] = y;
esp->point[i][j][k].C[2] = z;
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
temp = esp->point[i][j][k].C[3];
x *= invR;
y *= invR;
z *= invR;
v = 0;
for(n=0;n<Ncenters;n++)
{
x = esp->point[i][j][k].C[0]-GeomOrb[n].C[0];
y = esp->point[i][j][k].C[1]-GeomOrb[n].C[1];
z = esp->point[i][j][k].C[2]-GeomOrb[n].C[2];
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
v+= invR*GeomOrb[n].partialCharge;
}
esp->point[i][j][k].C[3]=v;
if(beg)
{
beg = FALSE;
esp->limits.MinMax[0][3] = v;
esp->limits.MinMax[1][3] = v;
}
else
{
if(esp->limits.MinMax[0][3]>v) esp->limits.MinMax[0][3] = v;
if(esp->limits.MinMax[1][3]<v) esp->limits.MinMax[1][3] = v;
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPMEPGRID,FALSE);
}
if(CancelCalcul)
{
esp = free_grid(esp);
}
return esp;
}
/*********************************************************************************/
Grid* compute_mep_grid_using_partial_charges(gint N[], GridLimits limits)
{
gint i;
gint j;
gint k;
gdouble x;
gdouble y;
gdouble z;
gdouble r;
gdouble temp;
gdouble PRECISION = 1e-13;
Grid* esp = NULL;
gdouble invR = 1.0;
gdouble v;
gint n;
gboolean beg = TRUE;
gdouble scale;
esp = grid_point_alloc(N,limits);
define_xyz_grid(esp);
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
scale = (gdouble)1.01/N[0];
for(i=0;i<N[0];i++)
{
for(j=0;j<N[1];j++)
{
for(k=0;k<N[2];k++)
{
x = esp->point[i][j][k].C[0];
y = esp->point[i][j][k].C[1];
z = esp->point[i][j][k].C[2];
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
temp = esp->point[i][j][k].C[3];
x *= invR;
y *= invR;
z *= invR;
v = 0;
for(n=0;n<Ncenters;n++)
{
x = esp->point[i][j][k].C[0]-GeomOrb[n].C[0];
y = esp->point[i][j][k].C[1]-GeomOrb[n].C[1];
z = esp->point[i][j][k].C[2]-GeomOrb[n].C[2];
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
v+= invR*GeomOrb[n].partialCharge;
}
esp->point[i][j][k].C[3]=v;
if(beg)
{
beg = FALSE;
esp->limits.MinMax[0][3] = v;
esp->limits.MinMax[1][3] = v;
}
else
{
if(esp->limits.MinMax[0][3]>v) esp->limits.MinMax[0][3] = v;
if(esp->limits.MinMax[1][3]<v) esp->limits.MinMax[1][3] = v;
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPMEPGRID,FALSE);
}
if(CancelCalcul)
{
esp = free_grid(esp);
}
return esp;
}
/*********************************************************/
static void getCOff(Grid* grid, gdouble* pxOff, gdouble* pyOff, gdouble* pzOff)
{
gdouble temp;
gdouble x,y,z;
int i,j,k;
gdouble xOff =0, yOff = 0, zOff = 0;
gdouble Q = 0;
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
x = grid->point[i][j][k].C[0];
y = grid->point[i][j][k].C[1];
z = grid->point[i][j][k].C[2];
temp = grid->point[i][j][k].C[3];
Q += temp;
xOff += temp*x;
yOff += temp*y;
zOff += temp*z;
}
}
}
if(Q!=0)
{
*pxOff = xOff/Q;
*pyOff = yOff/Q;
*pzOff = zOff/Q;
}
else
{
*pxOff = 0;
*pyOff = 0;
*pzOff = 0;
}
}
/*********************************************************************************/
Grid* compute_mep_grid_using_multipol_from_density_grid(Grid* grid, gint lmax)
{
gint i;
gint j;
gint k;
gint l;
gint m;
gdouble x;
gdouble y;
gdouble z;
gdouble r;
gdouble temp;
gdouble PRECISION = 1e-13;
Grid* esp = NULL;
gdouble** Q = NULL;
gdouble invR = 1.0;
gdouble v;
Zlm** slm = NULL;
gint n;
gboolean beg = TRUE;
gdouble scale;
gdouble xOff=0, yOff=0, zOff=0;
if(!test_grid_all_positive(grid))
{
Message(_("Sorry\n The current grid is not a grid for electronic density"),_("Error"),TRUE);
return NULL;
}
getCOff(grid,&xOff, &yOff, &zOff);
Q = compute_multipol_from_grid(grid,lmax, xOff, yOff, zOff);
if(!Q) return NULL;
esp = grid_point_alloc(grid->N,grid->limits);
slm = g_malloc((lmax+1)*sizeof(Zlm*));
for(l=0;l<=lmax;l++)
{
slm[l] = g_malloc((2*l+1)*sizeof(Zlm));
for(m=-l;m<=l;m++)
slm[l][l+m]=getZlm(l,m);
}
printf("Electronic values. All values in AU\n");
printf("Center %f %f %f\n", xOff, yOff, zOff);
for(l=0; l<=lmax; l++)
for(m=-l; m<=l; m++)
{
unsigned int absm = abs(m);
gdouble Norm = 1;
Norm = sqrt((2*l+1)/(4*PI))*sqrt(factorial(l+absm)*factorial(l-absm))/factorial(l)/pow(2.0,absm);
if(m!=0) Norm *= sqrt(2.0);
printf("Q[%d][%d] = %lf\n",l,m,Q[l][m+l]/Norm);
Q[l][m+l] *= 4*PI/(2*l+1);
}
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
scale = (gdouble)1.01/grid->N[0];
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
x = grid->point[i][j][k].C[0];
y = grid->point[i][j][k].C[1];
z = grid->point[i][j][k].C[2];
esp->point[i][j][k].C[0] = x;
esp->point[i][j][k].C[1] = y;
esp->point[i][j][k].C[2] = z;
x -=xOff;
y -=yOff;
z -=zOff;
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
temp = grid->point[i][j][k].C[3];
x *= invR;
y *= invR;
z *= invR;
v = 0;
for(l=0; l<=lmax; l++)
{
temp = pow(invR,l+1);
for(m=-l; m<=l; m++)
{
if(fabs(Q[l][m+l])<10*PRECISION) continue;
v += temp*getValueZlm(&slm[l][m+l],x,y,z)*Q[l][m+l];
}
}
for(n=0;n<Ncenters;n++)
{
x = grid->point[i][j][k].C[0]-GeomOrb[n].C[0];
y = grid->point[i][j][k].C[1]-GeomOrb[n].C[1];
z = grid->point[i][j][k].C[2]-GeomOrb[n].C[2];
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
v+= invR*GeomOrb[n].nuclearCharge;
}
esp->point[i][j][k].C[3]=v;
if(beg)
{
beg = FALSE;
esp->limits.MinMax[0][3] = v;
esp->limits.MinMax[1][3] = v;
}
else
{
if(esp->limits.MinMax[0][3]>v) esp->limits.MinMax[0][3] = v;
if(esp->limits.MinMax[1][3]<v) esp->limits.MinMax[1][3] = v;
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPMEPGRID,FALSE);
}
if(Q)
{
for(l=0;l<=lmax;l++)
if(Q[l])g_free(Q[l]);
g_free(Q);
}
if(slm)
{
for(l=0;l<=lmax;l++)
if(slm[l])g_free(slm[l]);
g_free(slm);
}
if(CancelCalcul)
{
esp = free_grid(esp);
}
return esp;
}
/*********************************************************************************/
Grid* compute_mep_grid_using_multipol_from_orbitals(gint N[],GridLimits limits, gint lmax)
{
Grid* eGrid = NULL;
Grid* esp = NULL;
TypeGrid = GABEDIT_TYPEGRID_EDENSITY;
eGrid = define_grid_point(N,limits,get_value_electronic_density);
esp = compute_mep_grid_using_multipol_from_density_grid(eGrid, lmax);
eGrid=free_grid(eGrid);
set_status_label_info(_("Grid")," ");
return esp;
}
/*********************************************************************************/
Grid* solve_poisson_equation_from_density_grid(Grid* grid, PoissonSolverMethod psMethod)
{
gint i;
gint j;
gint k;
Grid* esp = NULL;
DomainMG domain;
gdouble xL;
gdouble yL;
gdouble zL;
GridMG* source = NULL;
GridMG* potential = NULL;
gdouble fourPI = -4*PI;
PoissonMG* ps= NULL;
gint Nx, Ny, Nz;
LaplacianOrderMG laplacianOrder= GABEDIT_LAPLACIAN_2;
/* LaplacianOrderMG laplacianOrder= GABEDIT_LAPLACIAN_4;*/
gdouble PRECISION = 1e-13;
if(!test_grid_all_positive(grid))
{
Message(_("Sorry\n The current grid is not a grid for electronic density"),_("Error"),TRUE);
return NULL;
}
if(!grid) return NULL;
Nx = grid->N[0]-laplacianOrder;
Ny = grid->N[1]-laplacianOrder;
Nz = grid->N[2]-laplacianOrder;
if(Nx%2==0 || Ny%2==0 || Nz%2==0)
{
printf("The number of step should be odd\n");
return NULL;
}
xL = fabs(limits.MinMax[1][0]-limits.MinMax[0][0]);
yL = fabs(limits.MinMax[1][1]-limits.MinMax[0][1]);
zL = fabs(limits.MinMax[1][2]-limits.MinMax[0][2]);
domain = getDomainMG(Nx,Ny,Nz,
limits.MinMax[0][0], limits.MinMax[0][1], limits.MinMax[0][2],
xL, yL, zL, laplacianOrder);
/* printDomain(&domain);*/
source = getNewGridMGUsingDomain(&domain);
potential = getNewGridMGUsingDomain(&domain);
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
setTextInProgress(_("Compute of the source grid for the Poisson equation"));
for(i=0;i<grid->N[0];i++)
for(j=0;j<grid->N[1];j++)
for(k=0;k<grid->N[2];k++)
{
setValGridMG(source,i,j,k,grid->point[i][j][k].C[3]*fourPI);
}
ps = getPoissonMG(potential, source);
/*
ps->condition=GABEDIT_CONDITION_EWALD;
ps->condition=GABEDIT_CONDITION_CLUSTER;
ps->condition=GABEDIT_CONDITION_PERIODIC;
*/
if(ps->condition==GABEDIT_CONDITION_EWALD) setTextInProgress(_("Set boundary values from EWALD "));
else if(ps->condition==GABEDIT_CONDITION_CLUSTER) setTextInProgress(_("Set boundary values to 0 "));
else if(ps->condition==GABEDIT_CONDITION_PERIODIC) setTextInProgress(_("Periodic boundary conditions "));
else setTextInProgress(_("Set boundary values from multipole "));
tradesBoundaryPoissonMG(ps);
setTextInProgress(_("Solve the Poisson equation"));
/* solve poisson */
/*solveMGPoissonMG(ps, domain.maxLevel);*/
if(psMethod==GABEDIT_CG)
solveCGPoissonMG(ps, 2000, 1e-6);
else
solveMGPoissonMG3(ps, domain.maxLevel, 1000, 1e-6, 0);
if(CancelCalcul)
{
destroyPoissonMG(ps); /* destroy of source and potential Grid */
esp = free_grid(esp);
return NULL;
}
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
setTextInProgress(_("End the resolution of the Poisson equation"));
/*smootherPoissonMG(ps,100);*/
esp = copyGrid(grid);
for(i=0;i<esp->N[0];i++)
for(j=0;j<esp->N[1];j++)
for(k=0;k<esp->N[2];k++)
{
gdouble v = 0;
gint n;
gdouble x,y,z,r,invR;
for(n=0;n<Ncenters;n++)
{
x = esp->point[i][j][k].C[0]-GeomOrb[n].C[0];
y = esp->point[i][j][k].C[1]-GeomOrb[n].C[1];
z = esp->point[i][j][k].C[2]-GeomOrb[n].C[2];
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
v+= invR*GeomOrb[n].nuclearCharge;
}
esp->point[i][j][k].C[3] = v-getValGridMG(ps->potential, i, j, k);
}
destroyPoissonMG(ps); /* destroy of source and potential Grid */
reset_limits_for_grid(esp);
return esp;
}
/*********************************************************************************/
Grid* solve_poisson_equation_from_orbitals(gint N[],GridLimits limits, PoissonSolverMethod psMethod)
{
Grid* eGrid = NULL;
Grid* esp = NULL;
TypeGrid = GABEDIT_TYPEGRID_EDENSITY;
eGrid = define_grid_point(N,limits,get_value_electronic_density);
if(psMethod == GABEDIT_CG) TypeGrid = GABEDIT_TYPEGRID_MEP_CG;
else TypeGrid = GABEDIT_TYPEGRID_MEP_MG;
if(!eGrid) return NULL;
esp = solve_poisson_equation_from_density_grid(eGrid, psMethod);
eGrid=free_grid(eGrid);
set_status_label_info(_("Grid")," ");
return esp;
}
/*********************************************************************************/
Grid* compute_mep_grid_exact(gint N[],GridLimits limits)
{
gint i;
Grid* esp = NULL;
gboolean beg = TRUE;
gdouble scale;
gdouble V0[3];
gdouble V1[3];
gdouble V2[3];
gdouble firstPoint[3];
if(!AOrb)
{
Message(_("Sorry\n This option is implemented only for Gaussian Basis Function"),_("Error"),TRUE);
return NULL;
}
esp = grid_point_alloc(N,limits);
for(i=0;i<3;i++)
{
V0[i] = firstDirection[i] *(esp->limits.MinMax[1][0]-esp->limits.MinMax[0][0]);
V1[i] = secondDirection[i]*(esp->limits.MinMax[1][1]-esp->limits.MinMax[0][1]);
V2[i] = thirdDirection[i] *(esp->limits.MinMax[1][2]-esp->limits.MinMax[0][2]);
}
for(i=0;i<3;i++)
{
firstPoint[i] = V0[i] + V1[i] + V2[i];
/* firstPoint[i] = originOfCube[i] - firstPoint[i]/2;*/
firstPoint[i] = limits.MinMax[0][i];
}
for(i=0;i<3;i++)
{
V0[i] /= esp->N[0]-1;
V1[i] /= esp->N[1]-1;
V2[i] /= esp->N[2]-1;
}
#ifndef G_OS_WIN32
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
#endif
scale = (gdouble)1.01/esp->N[0];
#ifdef ENABLE_OMP
/*#pragma omp parallel for private(i)*/
#endif
for(i=0;i<esp->N[0];i++)
{
gint j;
gint k;
gdouble x;
gdouble y;
gdouble z;
gdouble r;
gdouble PRECISION = 1e-13;
gdouble invR = 1.0;
gdouble v;
gint n;
gdouble* XkXl = g_malloc(NAOrb*(NAOrb+1)/2*sizeof(gdouble));
if(!CancelCalcul)
for(j=0;j<esp->N[1];j++)
{
for(k=0;k<esp->N[2];k++)
{
x = firstPoint[0] + i*V0[0] + j*V1[0] + k*V2[0];
y = firstPoint[1] + i*V0[1] + j*V1[1] + k*V2[1];
z = firstPoint[2] + i*V0[2] + j*V1[2] + k*V2[2];
esp->point[i][j][k].C[0] = x;
esp->point[i][j][k].C[1] = y;
esp->point[i][j][k].C[2] = z;
v = 0;
v = get_value_electrostatic_potential( x, y, z, XkXl);
for(n=0;n<Ncenters;n++)
{
x = esp->point[i][j][k].C[0]-GeomOrb[n].C[0];
y = esp->point[i][j][k].C[1]-GeomOrb[n].C[1];
z = esp->point[i][j][k].C[2]-GeomOrb[n].C[2];
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
v+= invR*GeomOrb[n].nuclearCharge;
}
esp->point[i][j][k].C[3]=v;
}
}
#ifndef G_OS_WIN32
#ifdef ENABLE_OMP
/*#pragma omp critical*/
#endif
g_free(XkXl);
progress_orb(scale,GABEDIT_PROGORB_COMPMEPGRID,FALSE);
#endif
}
if(CancelCalcul)
progress_orb(0,GABEDIT_PROGORB_COMPMEPGRID,TRUE);
if(!CancelCalcul)
for(i=0;i<esp->N[0];i++)
{
gint j;
gint k;
gdouble v;
for(j=0;j<esp->N[1];j++)
{
for(k=0;k<esp->N[2];k++)
{
v = esp->point[i][j][k].C[3];
if(beg)
{
beg = FALSE;
esp->limits.MinMax[0][3] = v;
esp->limits.MinMax[1][3] = v;
}
else
{
if(esp->limits.MinMax[0][3]>v) esp->limits.MinMax[0][3] = v;
if(esp->limits.MinMax[1][3]<v) esp->limits.MinMax[1][3] = v;
}
}
}
}
if(CancelCalcul)
{
esp = free_grid(esp);
}
return esp;
}
/*********************************************************************************/
Grid* compute_mep_grid_using_orbitals(gint N[],GridLimits limits)
{
Grid* esp = NULL;
TypeGrid = GABEDIT_TYPEGRID_EDENSITY;
esp = compute_mep_grid_exact(N, limits);
set_status_label_info(_("Grid")," ");
return esp;
}
/**************************************************************/
gboolean compute_coulomb_integrale_iijj_poisson(gint N[],GridLimits limits, gint typeOrbi, gint i, gint typeOrbj, gint j,
gdouble* pInteg, gdouble* pNorm, gdouble* pNormj, gdouble* pOverlap)
{
Grid *gridi = NULL;
Grid *gridj = NULL;
Grid *potential = NULL;
gint k,l,m;
gdouble scale;
gdouble norm = 0;
gdouble normj = 0;
gdouble overlap = 0;
gdouble xx,yy,zz;
gdouble integ = 0;
gdouble dv = 0;
gdouble PRECISION = 1e-10;
*pInteg = -1;
*pNorm = -1;
*pNormj = -1;
*pOverlap = -1;
gridi = define_grid_orb(N, limits, typeOrbi, i);
if(!gridi) return FALSE;
if(CancelCalcul) return FALSE;
gridj = 0;
if(i==j) gridj = copyGrid(gridi);
else gridj = define_grid_orb(N, limits, typeOrbj, j);
if(!gridj) return FALSE;
if(CancelCalcul) return FALSE;
set_status_label_info(_("Grid"),_("Comp. phi_i^2 and phi_j^2"));
scale = (gdouble)1.01/gridi->N[0];
for(k=0;k<gridi->N[0];k++)
{
for(l=0;l<gridi->N[1];l++)
{
for(m=0;m<gridi->N[2];m++)
{
overlap += gridi->point[k][l][m].C[3]*gridj->point[k][l][m].C[3];
gridi->point[k][l][m].C[3] = gridi->point[k][l][m].C[3]* gridi->point[k][l][m].C[3];
gridj->point[k][l][m].C[3] = gridj->point[k][l][m].C[3]* gridj->point[k][l][m].C[3];
norm += gridi->point[k][l][m].C[3];
normj += gridj->point[k][l][m].C[3];
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
if(CancelCalcul)
{
free_grid(gridi);
free_grid(gridj);
return FALSE;
}
set_status_label_info(_("Grid"),_("Computing of Coulomb int."));
potential = solve_poisson_equation_from_density_grid(gridi, GABEDIT_MG);
if(CancelCalcul || !potential)
{
free_grid(gridi);
free_grid(gridj);
if(potential) free_grid(potential);
return FALSE;
}
scale = (gdouble)1.01/gridi->N[0];
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
for(k=0;k<gridi->N[0];k++)
{
for(l=0;l<gridi->N[1];l++)
for(m=0;m<gridi->N[2];m++)
{
gdouble v = 0;
gint n;
gdouble x,y,z,r,invR;
for(n=0;n<Ncenters;n++)
{
x = potential->point[k][l][m].C[0]-GeomOrb[n].C[0];
y = potential->point[k][l][m].C[1]-GeomOrb[n].C[1];
z = potential->point[k][l][m].C[2]-GeomOrb[n].C[2];
r = sqrt(x*x + y*y + z*z+PRECISION);
invR = 1.0 /r;
v+= invR*GeomOrb[n].nuclearCharge;
}
integ += -(potential->point[k][l][m].C[3]-v)*gridj->point[k][l][m].C[3];
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPINTEG,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
xx = gridi->point[1][0][0].C[0]-gridi->point[0][0][0].C[0];
yy = gridi->point[0][1][0].C[1]-gridi->point[0][0][0].C[1];
zz = gridi->point[0][0][1].C[2]-gridi->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
free_grid(gridi);
free_grid(gridj);
free_grid(potential);
if(CancelCalcul) return FALSE;
*pInteg = integ*dv;
*pNorm = norm*dv;
*pNormj = normj*dv;
*pOverlap = overlap*dv;
return TRUE;
}
/******************************************************************************************************************/
gboolean compute_transition_matrix_numeric(gint N[],GridLimits limits, gint typeOrbi, gint i, gint typeOrbj, gint j,
gdouble* pInteg, gdouble* pNormi, gdouble* pNormj, gdouble* pOverlap)
{
Grid *gridi = NULL;
Grid *gridj = NULL;
gint ki,li,mi;
gdouble scale;
gdouble normi = 0;
gdouble normj = 0;
gdouble overlap = 0;
gdouble xx,yy,zz;
gdouble dv = 0;
pInteg[0] = 0;
pInteg[1] = 0;
pInteg[2] = 0;
*pNormi = -1;
*pNormj = -1;
*pOverlap = -1;
gridi = define_grid_orb(N, limits, typeOrbi, i);
if(!gridi) return FALSE;
gridj = 0;
gridj = define_grid_orb(N, limits, typeOrbj, j);
if(!gridj) return FALSE;
set_status_label_info(_("Grid"),_("Comp. phi_i*phi_j"));
scale = (gdouble)1.01/gridi->N[0];
for(ki=0;ki<gridi->N[0];ki++)
{
for(li=0;li<gridi->N[1];li++)
{
for(mi=0;mi<gridi->N[2];mi++)
{
overlap += gridi->point[ki][li][mi].C[3]*gridj->point[ki][li][mi].C[3];
normi += gridi->point[ki][li][mi].C[3]*gridi->point[ki][li][mi].C[3];
normj += gridj->point[ki][li][mi].C[3]*gridj->point[ki][li][mi].C[3];
gridi->point[ki][li][mi].C[3] = gridi->point[ki][li][mi].C[3]* gridj->point[ki][li][mi].C[3];
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
if(CancelCalcul)
{
free_grid(gridi);
free_grid(gridj);
return FALSE;
}
set_status_label_info(_("Grid"),_("Computing of <i|vec r|j>."));
scale = (gdouble)1.01/gridi->N[0];
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
for(ki=0;ki<gridi->N[0];ki++)
{
for(li=0;li<gridi->N[1];li++)
for(mi=0;mi<gridi->N[2];mi++)
{
xx = gridi->point[ki][li][mi].C[0];
yy = gridi->point[ki][li][mi].C[1];
zz = gridi->point[ki][li][mi].C[2];
pInteg[0] += xx*gridi->point[ki][li][mi].C[3];
pInteg[1] += yy*gridi->point[ki][li][mi].C[3];
pInteg[2] += zz*gridi->point[ki][li][mi].C[3];
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
xx = gridi->point[1][0][0].C[0]-gridi->point[0][0][0].C[0];
yy = gridi->point[0][1][0].C[1]-gridi->point[0][0][0].C[1];
zz = gridi->point[0][0][1].C[2]-gridi->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
free_grid(gridi);
free_grid(gridj);
if(CancelCalcul) return FALSE;
pInteg[0] *= dv;
pInteg[1] *= dv;
pInteg[2] *= dv;
*pNormi = normi*dv;
*pNormj = normj*dv;
*pOverlap = overlap*dv;
return TRUE;
}
/******************************************************************************************************************/
gboolean compute_spatial_overlapij_numeric(gint N[],GridLimits limits, gint typeOrbi, gint i, gint typeOrbj, gint j,
gdouble* pInteg, gdouble* pNormi, gdouble* pNormj, gdouble* pOverlap)
{
Grid *gridi = NULL;
Grid *gridj = NULL;
gint ki,li,mi;
gdouble scale;
gdouble normi = 0;
gdouble normj = 0;
gdouble overlap = 0;
gdouble xx,yy,zz;
gdouble dv = 0;
*pInteg = 0;
*pNormi = -1;
*pNormj = -1;
*pOverlap = -1;
gridi = define_grid_orb(N, limits, typeOrbi, i);
if(!gridi) return FALSE;
gridj = 0;
gridj = define_grid_orb(N, limits, typeOrbj, j);
if(!gridj) return FALSE;
set_status_label_info(_("Grid"),_("Comp. |phi_i*phi_j|"));
scale = (gdouble)1.01/gridi->N[0];
for(ki=0;ki<gridi->N[0];ki++)
{
for(li=0;li<gridi->N[1];li++)
{
for(mi=0;mi<gridi->N[2];mi++)
{
overlap += gridi->point[ki][li][mi].C[3]*gridj->point[ki][li][mi].C[3];
normi += gridi->point[ki][li][mi].C[3]*gridi->point[ki][li][mi].C[3];
normj += gridj->point[ki][li][mi].C[3]*gridj->point[ki][li][mi].C[3];
gridi->point[ki][li][mi].C[3] = fabs(gridi->point[ki][li][mi].C[3]* gridj->point[ki][li][mi].C[3]);
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
if(CancelCalcul)
{
free_grid(gridi);
free_grid(gridj);
return FALSE;
}
set_status_label_info(_("Grid"),_("Computing of < |i| | |j| >."));
scale = (gdouble)1.01/gridi->N[0];
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
for(ki=0;ki<gridi->N[0];ki++)
{
for(li=0;li<gridi->N[1];li++)
for(mi=0;mi<gridi->N[2];mi++)
{
*pInteg += gridi->point[ki][li][mi].C[3];
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
xx = gridi->point[1][0][0].C[0]-gridi->point[0][0][0].C[0];
yy = gridi->point[0][1][0].C[1]-gridi->point[0][0][0].C[1];
zz = gridi->point[0][0][1].C[2]-gridi->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
free_grid(gridi);
free_grid(gridj);
if(CancelCalcul) return FALSE;
*pInteg *= dv;
*pNormi = normi*dv;
*pNormj = normj*dv;
*pOverlap = overlap*dv;
return TRUE;
}
/******************************************************************************************************************/
gboolean compute_spatial_overlapiijj_numeric(gint N[],GridLimits limits, gint typeOrbi, gint i, gint typeOrbj, gint j,
gdouble* pInteg, gdouble* pNormi, gdouble* pNormj, gdouble* pOverlap)
{
Grid *gridi = NULL;
Grid *gridj = NULL;
gint ki,li,mi;
gdouble scale;
gdouble normi = 0;
gdouble normj = 0;
gdouble overlap = 0;
gdouble xx,yy,zz;
gdouble dv = 0;
*pInteg = 0;
*pNormi = -1;
*pNormj = -1;
*pOverlap = -1;
gridi = define_grid_orb(N, limits, typeOrbi, i);
if(!gridi) return FALSE;
gridj = 0;
gridj = define_grid_orb(N, limits, typeOrbj, j);
if(!gridj) return FALSE;
set_status_label_info(_("Grid"),_("Comp. phi_i*phi_j"));
scale = (gdouble)1.01/gridi->N[0];
for(ki=0;ki<gridi->N[0];ki++)
{
for(li=0;li<gridi->N[1];li++)
{
for(mi=0;mi<gridi->N[2];mi++)
{
overlap += gridi->point[ki][li][mi].C[3]*gridj->point[ki][li][mi].C[3];
normi += gridi->point[ki][li][mi].C[3]*gridi->point[ki][li][mi].C[3];
normj += gridj->point[ki][li][mi].C[3]*gridj->point[ki][li][mi].C[3];
gridi->point[ki][li][mi].C[3] = gridi->point[ki][li][mi].C[3]* gridj->point[ki][li][mi].C[3];
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
if(CancelCalcul)
{
free_grid(gridi);
free_grid(gridj);
return FALSE;
}
set_status_label_info(_("Grid"),_("Computing of <i|vec r|j>."));
scale = (gdouble)1.01/gridi->N[0];
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
for(ki=0;ki<gridi->N[0];ki++)
{
for(li=0;li<gridi->N[1];li++)
for(mi=0;mi<gridi->N[2];mi++)
{
*pInteg += gridi->point[ki][li][mi].C[3]*gridi->point[ki][li][mi].C[3];
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
xx = gridi->point[1][0][0].C[0]-gridi->point[0][0][0].C[0];
yy = gridi->point[0][1][0].C[1]-gridi->point[0][0][0].C[1];
zz = gridi->point[0][0][1].C[2]-gridi->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
free_grid(gridi);
free_grid(gridj);
if(CancelCalcul) return FALSE;
*pInteg *= dv;
*pNormi = normi*dv;
*pNormj = normj*dv;
*pOverlap = overlap*dv;
return TRUE;
}
/**************************************************************/
gboolean compute_integrale_from_grid(Grid* grid, gboolean square, gdouble* pInteg)
{
gint k,l,m;
gdouble scale;
gdouble integ = 0;
gdouble dv = 0;
gdouble xx,yy,zz;
if(!grid) return FALSE;
if(CancelCalcul) return FALSE;
if(square) set_status_label_info(_("Grid"),_("Comp. integ f^2(x,y,z) dv from grid"));
else set_status_label_info(_("Grid"),_("Comp. integ f(,xy,z) dv from grid"));
scale = (gdouble)1.01/grid->N[0];
for(k=0;k<grid->N[0];k++)
{
for(l=0;l<grid->N[1];l++)
{
for(m=0;m<grid->N[2];m++)
{
if(square) integ += grid->point[k][l][m].C[3]*grid->point[k][l][m].C[3];
else integ += grid->point[k][l][m].C[3];
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_COMPGRID,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_COMPGRID,TRUE);
if(CancelCalcul) return FALSE;
xx = grid->point[1][0][0].C[0]-grid->point[0][0][0].C[0];
yy = grid->point[0][1][0].C[1]-grid->point[0][0][0].C[1];
zz = grid->point[0][0][1].C[2]-grid->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
*pInteg = integ*dv;
return TRUE;
}
/*************************************************************************************/
gboolean compute_integrale_from_grid_all_space(Grid* grid, gdouble* pInteg)
{
gint k,l,m;
gdouble integ = 0;
gdouble dv = 0;
gdouble xx,yy,zz;
if(!grid) return FALSE;
if(CancelCalcul) return FALSE;
for(k=0;k<grid->N[0];k++)
{
for(l=0;l<grid->N[1];l++)
{
for(m=0;m<grid->N[2];m++)
{
integ += grid->point[k][l][m].C[3];
}
if(CancelCalcul) return FALSE;
}
}
if(CancelCalcul) return FALSE;
xx = grid->point[1][0][0].C[0]-grid->point[0][0][0].C[0];
yy = grid->point[0][1][0].C[1]-grid->point[0][0][0].C[1];
zz = grid->point[0][0][1].C[2]-grid->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
*pInteg = integ*dv;
return TRUE;
}
/**************************************************************/
gboolean compute_integrale_from_grid_foranisovalue(Grid* grid, gboolean square, gdouble isovalue, gdouble* pInteg)
{
gint k,l,m;
gdouble scale;
gdouble integ = 0;
gdouble dv = 0;
gdouble xx,yy,zz;
if(!grid) return FALSE;
if(CancelCalcul) return FALSE;
scale = (gdouble)1.01/grid->N[0];
for(k=0;k<grid->N[0];k++)
{
for(l=0;l<grid->N[1];l++)
{
for(m=0;m<grid->N[2];m++)
{
if(!square && grid->point[k][l][m].C[3]<isovalue) continue;
if(square && fabs(grid->point[k][l][m].C[3])<isovalue) continue;
if(square) integ += grid->point[k][l][m].C[3]*grid->point[k][l][m].C[3];
else integ += grid->point[k][l][m].C[3];
}
if(CancelCalcul) return FALSE;
}
}
if(CancelCalcul) return FALSE;
xx = grid->point[1][0][0].C[0]-grid->point[0][0][0].C[0];
yy = grid->point[0][1][0].C[1]-grid->point[0][0][0].C[1];
zz = grid->point[0][0][1].C[2]-grid->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
*pInteg = integ*dv;
return TRUE;
}
/**************************************************************/
gboolean compute_isovalue_percent_from_grid(Grid* grid, gboolean square, gdouble percent, gdouble precision, gdouble* pIsovalue)
{
gdouble integAll = 0;
gdouble integ = 0;
gdouble isoMin = 0;
gdouble isoMax = 0;
gdouble iso = 0;
gchar tmp[BSIZE];
if(!grid) return FALSE;
if(CancelCalcul) return FALSE;
if(percent>100) percent = 100;
if(percent<0) percent = 0;
if(precision<1e-10) precision = 1e-3;
if(!compute_integrale_from_grid(grid, square, &integAll)) return FALSE;
/* printf("integAll = %f\n",integAll);*/
if(integAll<1e-10) return FALSE;
if(square) set_status_label_info(_("Grid"),_("Comp. integ f^2(x,y,z) dv from grid"));
else set_status_label_info(_("Grid"),_("Comp. integ f(,xy,z) dv from grid"));
isoMax = fabs(limits.MinMax[1][3]);
/*
printf("isoMin = %f\n",isoMin);
printf("isoMax = %f\n",isoMax);
*/
while(fabs(isoMax-isoMin)>precision)
{
iso = (isoMax+isoMin)/2;
sprintf(tmp,_("Computing integrale for isovalue = %f, (IsoMax-IsoMin) = %f, precision = %f"),iso, fabs(isoMax-isoMin),precision);
set_status_label_info(_("Grid"),tmp);
if(!compute_integrale_from_grid_foranisovalue(grid, square, iso, &integ)) return FALSE;
/* printf("iso = %f %%=%f\n",iso,integ/integAll*100);*/
if(integ/integAll*100<percent) isoMax = iso;
else isoMin = iso;
}
if(CancelCalcul) return FALSE;
*pIsovalue = iso;
return TRUE;
}
/*********************************************************************************************************************************/
gdouble getLambda2(Grid* grid, gint i, gint j, gint k, gdouble* fcx, gdouble* fcy, gdouble* fcz, gdouble* lfcx, gdouble* lfcy, gdouble* lfcz, gint nBoundary)
{
gint n,kn, nn, knn;
gdouble xx,yy,zz,xy,xz,yz,g;
gdouble tensor[6];
gdouble d[3];
static gdouble** eigv = NULL;
if(eigv==NULL)
{
eigv = g_malloc(3*sizeof(gdouble*));
for(n=0 ; n<3 ; n++) eigv[n] = g_malloc(3*sizeof(gdouble));
}
xx = lfcx[0]*grid->point[i][j][k].C[3];
yy = lfcy[0]*grid->point[i][j][k].C[3];
zz = lfcz[0]*grid->point[i][j][k].C[3];
for(n=1;n<=nBoundary;n++)
{
xx += lfcx[n] *(grid->point[i-n][j][k].C[3]+grid->point[i+n][j][k].C[3]);
yy += lfcy[n] *(grid->point[i][j-n][k].C[3]+grid->point[i][j+n][k].C[3]);
zz += lfcz[n] *(grid->point[i][j][k-n].C[3]+grid->point[i][j][k+n].C[3]);
}
/* extra-diagonal elements */
xy = 0;
xz = 0;
yz = 0;
for(n=-nBoundary, kn=0 ; kn<nBoundary ; n++, kn++)
{
/* compute grady rho at i+n*/
g = 0;
for(nn=-nBoundary, knn=0 ; knn<nBoundary ; nn++, knn++)
g += fcy[knn] * (grid->point[i+n][j+nn][k].C[3]-grid->point[i+n][j-nn][k].C[3]);
xy += fcx[kn] * g;
/* compute grady rho at i-n*/
g = 0;
for(nn=-nBoundary, knn=0 ; knn<nBoundary ; nn++, knn++)
g += fcy[knn] * (grid->point[i-n][j+nn][k].C[3]-grid->point[i-n][j-nn][k].C[3]);
xy += -fcx[kn] * g;
/* compute gradz rho at i+n*/
g = 0;
for(nn=-nBoundary, knn=0 ; knn<nBoundary ; nn++, knn++)
g += fcz[knn] * (grid->point[i+n][j][k+nn].C[3]-grid->point[i+n][j][k-nn].C[3]);
xz += fcx[kn] * g;
/* compute gradz rho at i-n*/
g = 0;
for(nn=-nBoundary, knn=0 ; knn<nBoundary ; nn++, knn++)
g += fcz[knn] * (grid->point[i-n][j][k+nn].C[3]-grid->point[i-n][j][k-nn].C[3]);
xz += -fcx[kn] * g;
/* compute gradz rho at j+n*/
g = 0;
for(nn=-nBoundary, knn=0 ; knn<nBoundary ; nn++, knn++)
g += fcz[knn] * (grid->point[i][j+n][k+nn].C[3]-grid->point[i][j+n][k-nn].C[3]);
yz += fcy[kn] * g;
/* compute gradz rho at j-n*/
g = 0;
for(nn=-nBoundary, knn=0 ; knn<nBoundary ; nn++, knn++)
g += fcz[knn] * (grid->point[i][j-n][k+nn].C[3]-grid->point[i][j-n][k-nn].C[3]);
yz += -fcy[kn] * g;
}
/*
xy = -xy;
xz = -xz;
yz = -yz;
*/
tensor[0] = xx;
tensor[1] = xy;
tensor[2] = yy;
tensor[3] = xz;
tensor[4] = yz;
tensor[5] = zz;
/*
printf("tensor\n");
printf("%0.12f\n",xx);
printf("%0.12f %0.12f\n",xy,yy);
printf("%0.12f %0.12f %0.12f\n",xz,yz,zz);
*/
d[1] = 0;
if(eigen(tensor, 3, d, eigv))
{
if(d[0]>d[1]) swapDouble(&d[0],&d[1]);
if(d[0]>d[2]) swapDouble(&d[0],&d[2]);
if(d[1]>d[2]) swapDouble(&d[1],&d[2]);
}
return d[1];
}
/*******************************************************************************************/
gboolean get_charge_transfer_centers(Grid* grid, gdouble* CN, gdouble* CP, gdouble *qn, gdouble* qp, gdouble* H)
{
gint i;
gint j;
gint k;
gint c;
gdouble sp = 0;
gdouble sn = 0;
gdouble scale = 1;
gdouble xx,yy,zz,dv;
gdouble HP[3];
gdouble HN[3];
gdouble Dx;
gdouble norm = 0;
gdouble DCT[3];
*qp = 0;
*qn = 0;
for(c=0;c<3;c++) CP[c] = 0.0;
for(c=0;c<3;c++) CN[c] = 0.0;
if(grid==NULL) return FALSE;
progress_orb(0,GABEDIT_PROGORB_UNK,TRUE);
scale = (gdouble)1.01/grid->N[0];
xx = grid->point[1][0][0].C[0]-grid->point[0][0][0].C[0];
yy = grid->point[0][1][0].C[1]-grid->point[0][0][0].C[1];
zz = grid->point[0][0][1].C[2]-grid->point[0][0][0].C[2];
dv = fabs(xx*yy*zz);
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
if(grid->point[i][j][k].C[3]>=0)
{
sp += grid->point[i][j][k].C[3];
for(c=0;c<3;c++) CP[c] += grid->point[i][j][k].C[3]*grid->point[i][j][k].C[c];
}
else
{
sn += grid->point[i][j][k].C[3];
for(c=0;c<3;c++) CN[c] += grid->point[i][j][k].C[3]*grid->point[i][j][k].C[c];
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_UNK,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_UNK,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_UNK,TRUE);
if(CancelCalcul)
{
for(c=0;c<3;c++) CP[c] = 0.0;
for(c=0;c<3;c++) CN[c] = 0.0;
return FALSE;
}
if(fabs(sp)>1e-10) for(c=0;c<3;c++) CP[c] /= sp;
if(fabs(sn)>1e-10) for(c=0;c<3;c++) CN[c] /= sn;
*qp = sp*dv;
*qn = sn*dv;
/* computing of Hindex */
/* Norm of DCT */
for(c=0;c<3;c++) DCT[c] = (CP[c]-CN[c]);
norm = 0;
for(c=0;c<3;c++) norm += DCT[c]*DCT[c];
norm = sqrt(norm);
/* nomalized vector along the DCT vector */
if(norm>0) for(c=0;c<3;c++) DCT[c] /=norm;
progress_orb(0,GABEDIT_PROGORB_UNK,TRUE);
for(c=0;c<3;c++) HP[c] = 0;
for(c=0;c<3;c++) HN[c] = 0;
for(i=0;i<grid->N[0];i++)
{
for(j=0;j<grid->N[1];j++)
{
for(k=0;k<grid->N[2];k++)
{
if(grid->point[i][j][k].C[3]>=0)
{
for(c=0;c<3;c++) HP[c] += grid->point[i][j][k].C[3]*(grid->point[i][j][k].C[c]-CP[c])*(grid->point[i][j][k].C[c]-CP[c])*DCT[c]*DCT[c];
}
else
{
for(c=0;c<3;c++) HN[c] += grid->point[i][j][k].C[3]*(grid->point[i][j][k].C[c]-CN[c])*(grid->point[i][j][k].C[c]-CN[c])*DCT[c]*DCT[c];
}
}
}
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_UNK,TRUE);
break;
}
progress_orb(scale,GABEDIT_PROGORB_UNK,FALSE);
}
progress_orb(0,GABEDIT_PROGORB_UNK,TRUE);
if(CancelCalcul)
{
for(c=0;c<3;c++) CP[c] = 0.0;
for(c=0;c<3;c++) CN[c] = 0.0;
*H = 0;
*qp = 0;
*qn = 0;
return FALSE;
}
if(fabs(sp)>1e-10) for(c=0;c<3;c++) HP[c] /= sp;
if(fabs(sn)>1e-10) for(c=0;c<3;c++) HN[c] /= sn;
*H = 0;
norm = 0;
for(c=0;c<3;c++) norm += HP[c];
*H += sqrt(fabs(norm));
norm = 0;
for(c=0;c<3;c++) norm += HN[c];
*H += sqrt(fabs(norm));
*H /= 2;
return TRUE;
}
|
fox_floats_timer_caching_omp_fileIO_benchmark.c | /* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices
*
* Implementation of parallel matrix multiplication:
* LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$
*
* Input:
* Input Matrix file name: A.dat, B.dat
*
* Output:
* Output Matrix file name: C.dat
* Output Sub-matrices file name: SubMatrices.dat
*
* Notes:
* 1. Assumes the number of processes is a perfect square
* 2. The array member of the matrices is statically allocated
*
* See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI
*/
/* Compiler command:
* mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c
* -o fox_floats_timer_caching_omp_fileIO_benchmark
*
* Run command:
* mpirun -n -4 ./fox_floats_timer_caching_omp
*/
/* Head files */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
// define problem scale, matrix row/col size
#define PROBLEM_SCALE 8192
// define whether or not Print Matices in the Command Line
#define PRINT_A 0
#define PRINT_B 0
#define PRINT_C 0
#define PRINT_LOCAL_A 0
#define PRINT_LOCAL_B 0
#define PRINT_LOCAL_C 0
// define float precision, 4 byte single-precision float or 8 byte double-precision float
#define FLOAT double
#define FLOAT_MPI MPI_DOUBLE
// Define threads speed-up affnity in the computing
#define NUM_THREADS 1
// Define threads affinity "scatter" or "compact"
#define AFFINITY "KMP_AFFINITY = compact"
/* Type define structure of process grid */
typedef struct {
int p; /* Total number of processes */
MPI_Comm comm; /* Communicator for entire grid */
MPI_Comm row_comm; /* Communicator for my row */
MPI_Comm col_comm; /* Communicator for my col */
int q; /* Order of grid */
int my_row; /* My row number */
int my_col; /* My column number */
int my_rank; /* My rank in the grid comm */
} GRID_INFO_T;
/* Type define structure of local matrix */
#define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21)
typedef struct {
int n_bar;
#define Order(A) ((A)->n_bar) // defination with parameters
FLOAT entries[MAX];
#define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference
} LOCAL_MATRIX_T;
/* Function Declarations */
LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar);
void Free_local_matrix(LOCAL_MATRIX_T** local_A);
void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Read matrix A from a file
void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
GRID_INFO_T* grid, int n); // Read matrix B from a file
void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Print matrix A in the command line
void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid, int n); // Print matrix B in the command line
void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Print matrix C in the command line
void Set_to_zero(LOCAL_MATRIX_T* local_A);
void Local_matrix_multiply(LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void Build_matrix_type(LOCAL_MATRIX_T* local_A);
MPI_Datatype local_matrix_mpi_t;
LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer
void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid);
void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid);
void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B,
GRID_INFO_T* grid);
void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Write matrix multiplication to a file
void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix A to a file
void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid); // Write local matrix B to a file
void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix C to a file
/*********************************************************/
main(int argc, char* argv[]) {
FILE *fp;
int p;
int my_rank;
GRID_INFO_T grid;
LOCAL_MATRIX_T* local_A;
LOCAL_MATRIX_T* local_B;
LOCAL_MATRIX_T* local_C;
int n;
int n_bar;
double timer_start;
double timer_end;
int content;
int i;
int j;
void Setup_grid(GRID_INFO_T* grid);
void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
// Matrix Generator
fp = fopen("A.dat", "w"); // Generate and print matrix A into a file
for (i = 0; i < PROBLEM_SCALE; i++) {
for (j = 0; j < PROBLEM_SCALE; j++)
if(i == j){
fprintf(fp,"%d ", 1);
}
else {
fprintf(fp,"%d ", 0);
}
fprintf(fp,"\n");
}
fclose(fp);
fp = fopen("B.dat", "w"); // Generate and print matrix B into a file
for (i = 0; i < PROBLEM_SCALE; i++){
for (j = 0; j < PROBLEM_SCALE; j++)
fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j);
fprintf(fp, "\n");
}
fclose(fp);
// SPMD Mode start from here (Processess fork from here)
MPI_Init(&argc, &argv); // MPI initializing
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
// Initial OpenMP Environment
omp_set_num_threads(NUM_THREADS);
kmp_set_defaults(AFFINITY);
Setup_grid(&grid); // Set up Processess grid
if (my_rank == 0) {
fp = fopen("A.dat","r");
n = 0;
while((content = fgetc(fp)) != EOF)
{
//printf("fgetc = %d\n", content);
if(content != 0x20 && content != 0x0A) n++;
}
fclose(fp);
n = (int) sqrt((double) n);
printf("We read the order of the matrices from A.dat is\n %d\n", n);
// while(fgetc(fp) != EOF) n++;
// printf("What's the order of the matrices?\n");
// scanf("%d", &n); // Overall Matrix's Order
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order
n_bar = n/grid.q; // \bar n is the local matrix's order
local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A
Order(local_A) = n_bar; // Local matrix A's order
Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_A == 1)
Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure)
local_B = Local_matrix_allocate(n_bar); // Allocate local matrix
Order(local_B) = n_bar; // Local matrix B's order
Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_B == 1)
Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure)
Build_matrix_type(local_A); // Buid local_A's MPI matrix data type
temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n
local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C
Order(local_C) = n_bar; // Set matrix local_C's order
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
timer_start = MPI_Wtime(); // Get the MPI wall time
Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function
timer_end = MPI_Wtime(); // Get the MPI wall time
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
if (PRINT_C == 1)
Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
Write_local_matrices_A("Write split of local matrix A into local_A.dat",
local_A, &grid); // Write local matrix A into file
if (PRINT_LOCAL_A == 1)
Print_local_matrices_A("Split of local matrix A",
local_A, &grid); // Print matrix A split in processess
Write_local_matrices_B("Write split of local matrix B into local_B.dat",
local_B, &grid); // Write local matrix B into file, special for row-major storage
if (PRINT_LOCAL_B == 1)
Print_local_matrices_B("Split of local matrix B",
local_B, &grid); // Print matrix B split in processess, special for row-major storage
Write_local_matrices_C("Write split of local matrix C into local_C.dat",
local_C, &grid); // Print matrix C split in processess
if (PRINT_LOCAL_C == 1)
Print_local_matrices_C("Split of local matrix C",
local_C, &grid); // Print matrix C split in processess
Free_local_matrix(&local_A); // Free local matrix local_A
Free_local_matrix(&local_B); // Free local matrix local_B
Free_local_matrix(&local_C); // Free local matrix local_C
if(my_rank == 0)
printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start);
MPI_Finalize(); // MPI finalize, processes join and resource recycle
} /* main */
/*********************************************************/
void Setup_grid(
GRID_INFO_T* grid /* out */) {
int old_rank;
int dimensions[2];
int wrap_around[2];
int coordinates[2];
int free_coords[2];
/* Set up Global Grid Information */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->p));
MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);
/* We assume p is a perfect square */ // but what if it's not a perfect square
grid->q = (int) sqrt((double) grid->p);
dimensions[0] = dimensions[1] = grid->q;
/* We want a circular shift in second dimension. */
/* Don't care about first */
wrap_around[0] = wrap_around[1] = 1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions,
wrap_around, 1, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
MPI_Cart_coords(grid->comm, grid->my_rank, 2,
coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* Set up row communicators */
free_coords[0] = 0;
free_coords[1] = 1;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->row_comm));
/* Set up column communicators */
free_coords[0] = 1;
free_coords[1] = 0;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->col_comm));
} /* Setup_grid */
/*********************************************************/
void Fox(
int n /* in */,
GRID_INFO_T* grid /* in */,
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */
/* matrix of A used during */
/* the current stage */
int stage;
int bcast_root;
int n_bar; /* n/sqrt(p) */
int source;
int dest;
MPI_Status status;
n_bar = n/grid->q;
Set_to_zero(local_C);
/* Calculate addresses for row circular shift of B */
source = (grid->my_row + 1) % grid->q;
dest = (grid->my_row + grid->q - 1) % grid->q;
/* Set aside storage for the broadcast block of A */
temp_A = Local_matrix_allocate(n_bar);
for (stage = 0; stage < grid->q; stage++) {
bcast_root = (grid->my_row + stage) % grid->q;
if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator
MPI_Bcast(local_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(local_A, local_B,
local_C);
} else { // temp_A is a buffer for process P_{ij} to store A_{ij}
MPI_Bcast(temp_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(temp_A, local_B,
local_C);
}
MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer
dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation
} /* for */
} /* Fox */
/*********************************************************/
LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) {
LOCAL_MATRIX_T* temp;
temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T));
return temp;
} /* Local_matrix_allocate */
/*********************************************************/
void Free_local_matrix(
LOCAL_MATRIX_T** local_A_ptr /* in/out */) {
free(*local_A_ptr);
} /* Free_local_matrix */
/*********************************************************/
/* Read and distribute matrix for matrix A:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_A(
char* prompt /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("A.dat","r");
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) {
for (mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp, "%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
*/
} else {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp,"%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status);
}
} /* Read_matrix */
/*********************************************************/
/* Read and distribute matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_B(
char* prompt /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT *temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("B.dat","r");
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) { // process 0 (local)
for (mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
/* scanf("%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
*/
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col); */
} else {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
MPI_Recv(temp, Order(local_B),
FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage
/* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status); */
}
free(temp);
}
} /* Read_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_A */
/*********************************************************/
/* Recive and Print Matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
*(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage
MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm);
}
free(temp);
}
} /* Print_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", Entry(local_C, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_C */
/*********************************************************/
/* Recive and Write Matrix C into a file:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Write_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
fp = fopen("C.dat", "w+");
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col));
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", temp[mat_col]);
// printf("%20.15E ", temp[mat_col]);
}
}
fprintf(fp,"\n");
}
free(temp);
fclose(fp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Write_matrix_C */
/*********************************************************/
/*
* Set local matrix's element to zero
*/
void Set_to_zero(
LOCAL_MATRIX_T* local_A /* out */) {
int i, j;
for (i = 0; i < Order(local_A); i++)
for (j = 0; j < Order(local_A); j++)
Entry(local_A,i,j) = 0.0E0;
} /* Set_to_zero */
/*********************************************************/
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype
/*
Synopsis
int MPI_Type_contiguous(int count,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
Input Parameters
count
replication count (nonnegative integer)
oldtype
old datatype (handle)
*/
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory
MPI_Address(&(local_A->n_bar), &address);
/*
Synopsis
int MPI_Address(const void *location, MPI_Aint *address)
Input Parameters
location
location in caller memory (choice)
Output Parameters
address
address of location (address integer)
*/
displacements[0] = address - start_address;
MPI_Address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t); // Creates a struct datatype
/*
Synopsis
int MPI_Type_struct(int count,
const int *array_of_blocklengths,
const MPI_Aint *array_of_displacements,
const MPI_Datatype *array_of_types,
MPI_Datatype *newtype)
Input Parameters
count
number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths
array_of_blocklengths
number of elements in each block (array)
array_of_displacements
byte displacement of each block (array)
array_of_types
type of elements in each block (array of handles to datatype objects)
Output Parameters
newtype
new datatype (handle)
*/
MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype
/*
Synopsis
int MPI_Type_commit(MPI_Datatype *datatype)
Input Parameters
datatype
datatype (handle)
*/
} /* Build_matrix_type */
/*********************************************************/
/* local matrix multiplication function
* withing OpenMP Thread Acceleration
*/
void Local_matrix_multiply(
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
int i, j, k;
// int my_rank;
// MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split
for (i = 0; i < Order(local_A); i++) {
// printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num());
for (j = 0; j < Order(local_A); j++)
for (k = 0; k < Order(local_B); k++)
Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage
+ Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k)
/* Entry(local_C,i,j) = Entry(local_C,i,j)
+ Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper
*/
}
} /* Local_matrix_multiply */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
printf("%20.15E ", Entry(local_A,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_A */
/*********************************************************/
/* Recive and Print Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_B */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
printf("%20.15E ", Entry(local_C,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_C */
/*********************************************************/
/* Recive and Write Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_A.dat","w+");
printf("%s\n", title);
fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
fprintf(fp,"%20.15E ", Entry(local_A,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_A */
/*********************************************************/
/* Recive and Write Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_B.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_B */
/*********************************************************/
/* Recive and Write Local Matrix C:
* Process 0 print local matrix local_C
* Other Processess send local matrix local_C to process 0
* And process 0 receive local matrix local_C from other processess
*/
void Write_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_C.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
fprintf(fp, "%20.15E ", Entry(local_C,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_C */
|
matmul-parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define SEED 123
void free_matrix(int **m, int size) {
for (int i = 0; i < size; i++)
free(m[i]);
free(m);
}
int **mul(int **a, int **b, int size) {
int **ret = malloc(size * sizeof(int *));
for (int i = 0; i < size; i++) {
ret[i] = calloc(size, sizeof(int));
for (int j = 0; j < size; j++)
for (int k = 0; k < size; k++)
ret[i][j] += a[i][k] * b[k][j];
}
return ret;
}
// Parallelise this function:
int **array_mul(int ***data, int n, int size) {
#pragma omp parallel
{
#pragma omp single
{
for (int i = 1; i <= n; i*=2) {
#pragma omp task shared(i)
{
for (int j = 0; j + i < n; j += i*2)
data[j] = mul(data[j], data[j+i], size);
}
#pragma omp taskwait
}
}
}
return data[0];
}
int **rnd_matrix(int size) {
int **ret = malloc(size * sizeof(int *));
for (int i = 0; i < size; i++) {
ret[i] = malloc(size * sizeof(int));
for (int j = 0; j < size; j++)
ret[i][j] = 2 * (rand() % 2) - 1; // Generates -1 or 1
}
return ret;
}
void print_matrix(int **m, int size) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++)
printf("%d ", m[i][j]);
printf("\n");
}
}
int main(int argc, char **argv) {
int n, size;
double t;
FILE *input;
if (argc < 2) {
fprintf(stderr, "Error: missing path to input file!\n");
return EXIT_FAILURE;
}
if ((input = fopen(argv[1], "r")) == NULL) {
fprintf(stderr, "Error: could not open input file!\n");
return EXIT_FAILURE;
}
fscanf(input, "%d %d", &n, &size);
srand(SEED);
// Do not change this line
omp_set_num_threads(4);
int ***data = malloc(n * sizeof(int **));
for (int i = 0; i < n; i++)
data[i] = rnd_matrix(size);
t = omp_get_wtime();
int **ret = array_mul(data, n, size);
t = omp_get_wtime() - t;
print_matrix(ret, size);
fprintf(stderr, "%lf\n", t);
free_matrix(ret, size);
free(data);
return 0;
}
|
grid_utils.h | #pragma once
#include <mtao/types.hpp>
#include <array>
#include <tuple>
#include <iterator>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace mtao::geometry::grid::utils {
namespace internal {
constexpr int full_mask(int D) {
return (1 << (D)) - 1;
}
template <int N, int M, int Mask, typename coord_type, typename Func, bool Reverse = false, bool Parallel = false>
struct masked_multi_looper {
constexpr static bool Masked(int idx) {
return !bool(Mask & (1 << idx));
}
static bool masked(int mask, int idx) {
return !bool(mask & (1 << idx));
}
constexpr static int MyN = Reverse?M-N-1:N;
static void run(const coord_type& bounds, coord_type& idx, const Func& f, int mask = 0) {
if constexpr(Mask == full_mask(M+1)) {
if(masked(mask,N)) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f, mask);
return;
}
}
if constexpr(Masked(N)) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f, mask);
} else if constexpr(false && M >= N+2 && !Masked(N+1)) {
constexpr static int NN = N+1;
constexpr static int MyNN = Reverse?M-NN-1:NN;
if constexpr(Parallel) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(auto& i = idx[MyN] = 0; i < bounds[MyN]; ++i) {
for(auto& j = idx[MyNN] = 0; j < bounds[MyNN]; ++j) {
masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask);
}
}
} else {
for(auto& i = idx[MyN] = 0; i < bounds[MyN]; ++i) {
for(auto& j = idx[MyNN] = 0; j < bounds[MyNN]; ++j) {
masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask);
}
}
}
idx[MyNN] = 0;
} else if constexpr(Parallel) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(auto&& i = idx[MyN] = 0; i < bounds[MyN]; ++i) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask);
}
} else {
for(auto&& i = idx[MyN] = 0; i < bounds[MyN]; ++i) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask);
}
}
idx[MyN] = 0;
}
static void run(const coord_type& begin, const coord_type& end, coord_type& idx, const Func& f, int mask = 0) {
if constexpr(Mask == full_mask(M+1)) {
if(masked(mask,N)) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask);
return;
}
}
if constexpr(Masked(N)) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f,mask);
} else if constexpr(false && M >= N+2 && !Masked(N+1)) {
constexpr static int NN = N+1;
constexpr static int MyNN = Reverse?M-NN-1:NN;
if constexpr(Parallel) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(auto& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) {
for(auto& j = idx[MyNN] = begin[MyNN]; j < end[MyNN]; ++j) {
masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask);
}
}
} else {
for(auto& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) {
for(auto& j = idx[MyNN] = begin[MyNN]; j < end[MyNN]; ++j) {
masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask);
}
}
}
idx[MyNN] = begin[MyNN];
} else if constexpr(Parallel) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(auto&& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask);
}
} else {
for(auto&& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) {
masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask);
}
}
idx[MyN] = begin[MyN];
}
};
template <int N, int Mask, typename coord_type, typename Func, bool Reverse, bool Parallel>
struct masked_multi_looper<N,N, Mask,coord_type,Func,Reverse, Parallel> {
static void run(const coord_type& bounds, coord_type& idx, const Func& f, int mask = 0) {
f(idx);
}
static void run(const coord_type& begin, const coord_type& end, coord_type& idx, const Func& f, int mask = 0) {
f(idx);
}
};
template <int N, int M, typename coord_type, typename Func, bool Reverse = false>
using multi_looper = masked_multi_looper<N,M,((1<<M) - 1),coord_type, Func, Reverse, false>;
#ifdef _OPENMP
template <int N, int M, typename coord_type, typename Func, bool Reverse = false>
using multi_looper_parallel = masked_multi_looper<N,M,((1<<M) - 1),coord_type, Func, Reverse, true>;
#else
template <int N, int M, typename coord_type, typename Func, bool Reverse = false>
using multi_looper_parallel = masked_multi_looper<N,M,((1<<M) - 1),coord_type, Func, Reverse, true>;
#endif
}
// Mask is an integer M such that every dimension d s.t (1 << d) & M == 0, the looper skips that dimension
template <int Mask, typename coord_type, typename Func>
void masked_multi_loop(const coord_type& index, const Func& f) {
coord_type idx = {};
internal::masked_multi_looper<0,std::tuple_size<coord_type>::value,Mask,coord_type,Func,false>::run(index,idx,f);
}
template <typename coord_type, typename Func>
void masked_multi_loop(const coord_type& index, const Func& f, int mask) {
coord_type idx = {};
constexpr static int size = std::tuple_size<coord_type>::value;
internal::masked_multi_looper<0,size,internal::full_mask(size+1),coord_type,Func,false>::run(index,idx,f, mask);
}
template <typename coord_type, typename Func>
void multi_loop(const coord_type& index, const Func& f) {
coord_type idx = {};
internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(index,idx,f);
}
template <typename coord_type, typename Func>
void multi_loop_parallel(const coord_type& index, const Func& f) {
coord_type idx = {};
internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(index,idx,f);
}
template <typename coord_type, typename Func>
void right_multi_loop(const coord_type& index, const Func& f) {//Same above but does dimensions in reverse
coord_type idx = {};
std::fill(idx.begin(),idx.end(),0);
internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,true>::run(index,idx,f);
}
template <typename coord_type, typename Func>
void multi_loop(const coord_type& begin, const coord_type& end, const Func& f) {
coord_type idx(begin);
internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(begin,end,idx,f);
}
template <typename coord_type, typename Func>
void multi_loop_parallel(const coord_type& begin, const coord_type& end, const Func& f) {
coord_type idx(begin);
internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(begin,end,idx,f);
}
template <typename coord_type, typename Func>
void right_multi_loop(const coord_type& begin, const coord_type& end, const Func& f) {//Same above but does dimensions in reverse
coord_type idx(begin);
internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,true>::run(begin,end,idx,f);
}
}
namespace mtao {
namespace interp_internal {
template <typename T, typename U>
T lerp(const T& a, const T& b, U alpha) {//alpha = 0 -> a, alpha = 1 -> b
return (1-alpha) * a + alpha * b;
}
template <typename T, typename U>
T bilerp(const T& a, const T& b, const T& c, const T& d, U alpha, U beta) {
return lerp(lerp(a,b,alpha), lerp(c,d,alpha),beta);// 0,0 => a, 0,1 => b, 1,0 => c, 1,1 => d
}
template <typename T, typename U>
T trilerp(const T& a, const T& b, const T& c, const T& d, const T& e, const T& f, const T& g, const T& h, U alpha, U beta, U gamma) {
return lerp(
bilerp(a,b,c,d,alpha,beta)// 0,0,0 => a, 0,1,0 => b, 1,0,0 => c, 1,1,0 => d
,
bilerp(e,f,g,h,alpha,beta)// 0,0,1 => e, 0,1,1 => f, 1,0,1 => g, 1,1,1 => h
,
gamma);
}
template <typename GridType, typename U>
typename GridType::Scalar bilerp(const GridType& g, int i, int j, U alpha, U beta) {
return bilerp(g(i,j),g(i+1,j ),g(i,j+1 ),g(i+1,j+1),alpha,beta);
}
template <typename GridType, typename U>
typename GridType::Scalar trilerp(const GridType& g, int i, int j, int k, U alpha, U beta, U gamma) {
return trilerp(
g(i,j,k ),g(i+1,j ,k ),g(i,j+1 ,k ),g(i+1,j+1,k )
,
g(i,j,k+1),g(i+1,j ,k+1),g(i,j+1 ,k+1),g(i+1,j+1,k+1)
,alpha,beta,gamma);
}
}
template <typename T>
void barycentric(T a, int ni, int* i, T* di ) {
constexpr static T ome = 1 - std::numeric_limits<T>::epsilon();
a = std::min<T>(std::max<T>(a,0),ni);
T v = a;
T v2 = std::floor(v);
*di = v-v2;
*i = int(v2);
if(*i < 0) {
*di = 0;
*i = 0;
} else if(*i >= ni) {
*i = ni-1;
*di = ome;
}
}
template <typename VecType, typename ShapeType,typename DiffType, int D = std::tuple_size<ShapeType>::value>
void barycentric(const VecType& v, const ShapeType& shape, ShapeType& i, DiffType& di) {
static_assert(std::tuple_size<DiffType>::value == D,"");
for(size_t idx = 0; idx < D; ++idx) {
barycentric(v(idx),shape[idx]-1,&i[idx],&di[idx]);
}
}
template <typename T, typename GridType>
typename GridType::Scalar lerp(const GridType& g, const std::array<int,GridType::D>& i, const std::array<T,GridType::D>& di);
template <typename T, typename GridType>
typename GridType::Scalar lerp(const GridType& g, const std::array<int,2>& i, const std::array<T,2>& di) {
static_assert(GridType::D == 2,"");
return mtao::interp_internal::bilerp(g,i[0],i[1],di[0],di[1]);
}
template <typename T, typename GridType>
typename GridType::Scalar lerp(const GridType& g, const std::array<int,3>& i, const std::array<T,3>& di) {
static_assert(GridType::D == 3,"");
return mtao::interp_internal::trilerp(g,i[0],i[1],i[2],di[0],di[1],di[2]);
}
template <typename VecType, typename GridType>
typename GridType::Scalar lerp(const GridType& g, const VecType& v) {
constexpr int Dim = GridType::D;//mtao::Grid assumption
//int i,j;
//float di,dj;
std::array<int,Dim> i;
std::array<typename VecType::Scalar,Dim> di;
barycentric(v,g.shape(),i,di);
return lerp(g,i,di);
}
template <typename VecType, typename GridType>
typename GridType::Scalar bilerp(const GridType& g, const VecType& v) {
return lerp(g,v);
}
}
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/exception-private.h"
#include "magick/enhance.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
const Image
*next;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if ((next->columns != image->columns) || (next->rows != image->rows))
ThrowImageException(OptionError,"ImagesAreNotTheSameSize");
}
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse)
{
InheritException(exception,&combine_image->exception);
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace);
else
(void) SetImageColorspace(combine_image,sRGBColorspace);
}
if ((channel & OpacityChannel) != 0)
combine_image->matte=MagickTrue;
(void) SetImageBackgroundColor(combine_image);
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
PixelPacket
*pixels;
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
if (((channel & RedChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & GreenChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & BlueChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) && (next != (Image *) NULL))
{
IndexPacket
*indexes;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(combine_view);
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CombineImageTag,progress,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (IsGrayColorspace(combine_image->colorspace) != MagickFalse)
(void) TransformImageColorspace(combine_image,sRGBColorspace);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImageChannel() separates a channel from the image and returns it as
% a grayscale image. A channel is a particular color component of each pixel
% in the image.
%
% The format of the SeparateImageChannel method is:
%
% MagickBooleanType SeparateImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
*/
MagickExport Image *SeparateImage(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*separate_image;
MagickBooleanType
status;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
status=SeparateImageChannel(separate_image,channel);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
MagickExport MagickBooleanType SeparateImageChannel(Image *image,
const ChannelType channel)
{
#define SeparateImageTag "Separate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (channel == GrayChannels)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Separate image channels.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
switch (channel)
{
case RedChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
case GreenChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelGreen(q));
SetPixelBlue(q,GetPixelGreen(q));
q++;
}
break;
}
case BlueChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelBlue(q));
SetPixelGreen(q,GetPixelBlue(q));
q++;
}
break;
}
case OpacityChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelOpacity(q));
SetPixelGreen(q,GetPixelOpacity(q));
SetPixelBlue(q,GetPixelOpacity(q));
q++;
}
break;
}
case BlackChannel:
{
if ((image->storage_class != PseudoClass) &&
(image->colorspace != CMYKColorspace))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIndex(indexes+x));
SetPixelGreen(q,GetPixelIndex(indexes+x));
SetPixelBlue(q,GetPixelIndex(indexes+x));
q++;
}
break;
}
case TrueAlphaChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelAlpha(q));
SetPixelGreen(q,GetPixelAlpha(q));
SetPixelBlue(q,GetPixelAlpha(q));
q++;
}
break;
}
case GrayChannels:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
break;
}
default:
break;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (channel != GrayChannels)
{
image->matte=MagickFalse;
(void) SetImageColorspace(image,GRAYColorspace);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% MagickBooleanType SeparateImages(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channels to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*images,
*separate_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
if ((channel & RedChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,RedChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & GreenChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,GreenChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & BlueChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlueChannel);
AppendImageToList(&images,separate_image);
}
if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace))
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlackChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & AlphaChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,TrueAlphaChannel);
AppendImageToList(&images,separate_image);
}
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelType alpha_type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, Disassociate,
% DeactivateAlphaChannel, ExtractAlphaChannel, OpaqueAlphaChannel,
% ResetAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, and
% TransparentAlphaChannel.
%
*/
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelType alpha_type)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->matte=MagickTrue;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q)));
SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q)));
SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->matte=MagickFalse;
break;
}
case BackgroundAlphaChannel:
{
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
/*
Set transparent pixels to background color.
*/
if (image->matte == MagickFalse)
break;
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (q->opacity == TransparentOpacity)
{
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
}
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
case ShapeAlphaChannel:
{
/*
Special usage case for SeparateImageChannel(): copy grayscale color to
the alpha channel.
*/
status=SeparateImageChannel(image,GrayChannels);
image->matte=MagickTrue; /* make sure transparency is now on! */
if (alpha_type == ShapeAlphaChannel)
{
MagickPixelPacket
background;
/*
Reset all color channels to background color.
*/
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *)
NULL,&background);
(void) LevelColorsImage(image,&background,&background,MagickTrue);
}
break;
}
case DeactivateAlphaChannel:
{
image->matte=MagickFalse;
break;
}
case DisassociateAlphaChannel:
{
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
image->matte=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
alpha,
gamma;
alpha=QuantumScale*GetPixelAlpha(q);
gamma=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q)));
SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q)));
SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->matte=MagickFalse;
break;
}
case ExtractAlphaChannel:
{
status=SeparateImageChannel(image,TrueAlphaChannel);
image->matte=MagickFalse;
break;
}
case RemoveAlphaChannel:
case FlattenAlphaChannel:
{
IndexPacket
index;
MagickPixelPacket
background;
PixelPacket
pixel;
/*
Flatten image pixels over the background pixels.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
(void) memset(&pixel,0,sizeof(pixel));
index=0;
SetPixelPacket(image,&background,&pixel,&index);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
opacity;
gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity;
opacity=(double) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red,
(MagickRealType) q->opacity,(MagickRealType) pixel.red,
(MagickRealType) pixel.opacity));
q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green,
(MagickRealType) q->opacity,(MagickRealType) pixel.green,
(MagickRealType) pixel.opacity));
q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue,
(MagickRealType) q->opacity,(MagickRealType) pixel.blue,
(MagickRealType) pixel.opacity));
q->opacity=ClampToQuantum(opacity);
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case ResetAlphaChannel: /* deprecated */
case OpaqueAlphaChannel:
{
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case SetAlphaChannel:
{
if (image->matte == MagickFalse)
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case TransparentAlphaChannel:
{
status=SetImageOpacity(image,TransparentOpacity);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
return(SyncImagePixelCache(image,&image->exception));
}
|
pack_tril.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <string.h>
#include <complex.h>
#include "config.h"
#include "np_helper.h"
void NPdsymm_triu(int n, double *mat, int hermi)
{
size_t i, j, j0, j1;
if (hermi == HERMITIAN || hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
mat[i*n+j] = mat[j*n+i];
}
} else {
TRIU_LOOP(i, j) {
mat[i*n+j] = -mat[j*n+i];
}
}
}
void NPzhermi_triu(int n, double complex *mat, int hermi)
{
size_t i, j, j0, j1;
if (hermi == HERMITIAN) {
TRIU_LOOP(i, j) {
mat[i*n+j] = conj(mat[j*n+i]);
}
} else if (hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
mat[i*n+j] = mat[j*n+i];
}
} else {
TRIU_LOOP(i, j) {
mat[i*n+j] = -conj(mat[j*n+i]);
}
}
}
void NPdunpack_tril(int n, double *tril, double *mat, int hermi)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
mat[i*n+j] = tril[ij];
}
}
if (hermi) {
NPdsymm_triu(n, mat, hermi);
}
}
// unpack one row from the compact matrix-tril coefficients
void NPdunpack_row(int ndim, int row_id, double *tril, double *row)
{
int i;
size_t idx = (size_t)row_id * (row_id + 1) / 2;
memcpy(row, tril+idx, sizeof(double)*row_id);
for (i = row_id; i < ndim; i++) {
idx += i;
row[i] = tril[idx];
}
}
void NPzunpack_tril(int n, double complex *tril, double complex *mat,
int hermi)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
mat[i*n+j] = tril[ij];
}
}
if (hermi) {
NPzhermi_triu(n, mat, hermi);
}
}
void NPdpack_tril(int n, double *tril, double *mat)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
tril[ij] = mat[i*n+j];
}
}
}
void NPzpack_tril(int n, double complex *tril, double complex *mat)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
tril[ij] = mat[i*n+j];
}
}
}
/* out += in[idx[:,None],idy] */
void NPdtake_2d(double *out, double *in, int *idx, int *idy,
int odim, int idim, int nx, int ny)
{
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double *pin;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pin = in + (size_t)idim * idx[i];
for (j = 0; j < ny; j++) {
out[i*odim+j] = pin[idy[j]];
}
}
}
}
void NPztake_2d(double complex *out, double complex *in, int *idx, int *idy,
int odim, int idim, int nx, int ny)
{
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double complex *pin;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pin = in + (size_t)idim * idx[i];
for (j = 0; j < ny; j++) {
out[i*odim+j] = pin[idy[j]];
}
}
}
}
/* out[idx[:,None],idy] += in */
void NPdtakebak_2d(double *out, double *in, int *idx, int *idy,
int odim, int idim, int nx, int ny, int thread_safe)
{
if (thread_safe) {
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double *pout;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pout = out + (size_t)odim * idx[i];
for (j = 0; j < ny; j++) {
pout[idy[j]] += in[i*idim+j];
}
}
}
} else {
size_t i, j;
double *pout;
for (i = 0; i < nx; i++) {
pout = out + (size_t)odim * idx[i];
for (j = 0; j < ny; j++) {
pout[idy[j]] += in[i*idim+j];
}
}
}
}
void NPztakebak_2d(double complex *out, double complex *in, int *idx, int *idy,
int odim, int idim, int nx, int ny, int thread_safe)
{
if (thread_safe) {
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double complex *pout;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pout = out + (size_t)odim * idx[i];
for (j = 0; j < ny; j++) {
pout[idy[j]] += in[i*idim+j];
}
}
}
} else {
size_t i, j;
double complex *pout;
for (i = 0; i < nx; i++) {
pout = out + (size_t)odim * idx[i];
for (j = 0; j < ny; j++) {
pout[idy[j]] += in[i*idim+j];
}
}
}
}
void NPdunpack_tril_2d(int count, int n, double *tril, double *mat, int hermi)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat, hermi)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPdunpack_tril(n, tril+n2*ic, mat+nn*ic, hermi);
}
}
}
void NPzunpack_tril_2d(int count, int n,
double complex *tril, double complex *mat, int hermi)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat, hermi)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPzunpack_tril(n, tril+n2*ic, mat+nn*ic, hermi);
}
}
}
void NPdpack_tril_2d(int count, int n, double *tril, double *mat)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPdpack_tril(n, tril+n2*ic, mat+nn*ic);
}
}
}
void NPzpack_tril_2d(int count, int n, double complex *tril, double complex *mat)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPzpack_tril(n, tril+n2*ic, mat+nn*ic);
}
}
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
ssize_t
i;
ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidOption","`%s'","Should not call this function");
return((KernelInfo *) NULL);
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
ssize_t
x,r;
double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidOption","`%s'","Not a Primitive Morphology Method");
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=1.0;
count=0;
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
{
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels,
*magick_restrict quantum_pixels;
ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
quantum_pixels=(const Quantum *) NULL;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
quantum_pixels=pixels;
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
quantum_pixels=pixels;
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentially white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
if (quantum_pixels != (const Quantum *) NULL)
{
SetPixelChannel(morphology_image,channel,quantum_pixels[i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ ssize_t
i,j,x,y;
MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
double
pos_scale,
neg_scale;
ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
VecSum_openMP+AVX.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <immintrin.h>
void print_vec(int* vec, int l) {
for (int i = 0; i < l; i++)
printf("%d ", vec[i]);
printf("\n");
}
int main(int argv, char* argc[]) {
int threadNum = 1;
#ifdef _OPENMP
threadNum = strtol(argc[1], NULL, 10);
#endif
int vecLength;
printf("Enter length of the summed vector:");
scanf_s("%d", &vecLength);
int* vec1, * vec2, * vec3;
vec1 = (int*)malloc(vecLength * sizeof(int));
vec2 = (int*)malloc(vecLength * sizeof(int));
vec3 = (int*)malloc(vecLength * sizeof(int));
if (vec1 == NULL || vec2 == NULL || vec3 == NULL) {
printf("Memory allocation failed.\n");
return 0;
}
srand((unsigned int)time(NULL));
for (int i = 0; i < vecLength; i++)
vec1[i] = rand(), vec2[i] = rand();
print_vec(vec1, vecLength), print_vec(vec2, vecLength);
double startTime = omp_get_wtime();
int i;
#ifdef _OPENMP
#pragma omp parallel for num_threads(threadNum) \
default(none) shared(vecLength, vec1, vec2, vec3) private(i)
for (i = 0; i < vecLength / 8; i++) {
__m256i intVec1 = _mm256_load_si256((__m256i*)(vec1 + i * 8));
__m256i intVec2 = _mm256_load_si256((__m256i*)(vec2 + i * 8));
__m256i intVec3 = _mm256_add_epi32(intVec1, intVec2);
_mm256_store_si256(vec3 + i * 8, intVec3);
}
#endif
for (i = (vecLength / 8) * 8; i < vecLength; i++)
vec3[i] = vec1[i] + vec2[i];
double endTime = omp_get_wtime();
print_vec(vec3, vecLength);
printf("openMP+AVX test completed in: %lf\n", endTime - startTime);
return 0;
} |
main.c | #include "stdafx.h"
#include "intrin.h"
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <pthread.h>
#include <jansson.h>
//#include <atomic.h>
// Link with ws2_32.lib
#pragma comment(lib, "Ws2_32.lib")
#pragma comment(lib, "CRYPT32.LIB")
#ifdef __x86_64__
//#include <cpuid.h>
#endif
#ifdef __linux__
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <sched.h>
#else
#include <winsock2.h>
#undef __cpuid
#endif
#include <CL/cl.h>
#include "cryptonight.h"
#include "minerutils.h"
#include "minerlog.h"
#include "minernet.h"
#include "stratum.h"
#include "miner.h"
#include "ocl.h"
#include "openssl/ssl.h"
#include "openssl/err.h"
#include "openssl/opensslconf.h"
#define STRATUM_TIMEOUT_SECONDS 120
// I know, it's lazy.
#define STRATUM_MAX_MESSAGE_LEN_BYTES 8192
#ifndef OPENSSL_THREADS
#define THISISATEST 0
#else
#define THISISATEST 1
#endif
typedef struct _StatusInfo
{
uint64_t SolvedWork;
uint64_t RejectedWork;
double *ThreadHashCounts;
double *ThreadTimes;
} StatusInfo;
pthread_mutex_t StatusMutex = PTHREAD_MUTEX_INITIALIZER;
StatusInfo GlobalStatus;
static cryptonight_func *cryptonight_hash_ctx;
typedef struct _WorkerInfo
{
char *User;
char *Pass;
struct _WorkerInfo *NextWorker;
} WorkerInfo;
typedef struct _PoolInfo
{
SOCKET sockfd;
char *PoolName;
char *StrippedURL;
char *Port;
WorkerInfo WorkerData;
uint32_t MinerThreadCount;
uint32_t *MinerThreads;
//atomic_uint StratumID;
#pragma omp atomic
uint32_t StratumID;
char XMRAuthID[64];
SSL *ssl;
} PoolInfo;
#pragma omp atomic
bool *RestartMining;
bool ExitFlag = false;
int ExitPipe[2];
JobInfo Jobs[2];
volatile JobInfo *CurrentJob;
volatile int JobIdx;
typedef struct _Share
{
struct _Share *next;
JobInfo *Job;
uint32_t Nonce;
int Gothash;
uint8_t Blob[32];
} Share;
typedef struct _ShareQueue
{
Share *first;
Share *last;
} ShareQueue;
Share *ShareList;
Share *GetShare()
{
Share *ret;
if (ShareList) {
ret = ShareList;
ShareList = ret->next;
} else {
ret = (Share *)malloc(sizeof(Share));
}
return ret;
}
void SubmitShare(ShareQueue *queue, Share *NewShare)
{
NewShare->next = NULL;
if(!queue->first) queue->first = queue->last = NewShare;
else queue->last = queue->last->next = NewShare;
}
Share *RemoveShare(ShareQueue *queue)
{
Share *tmp = queue->first;
if(queue->first) queue->first = queue->first->next;
return(tmp);
}
void FreeShare(Share *share)
{
share->next = ShareList;
ShareList = share;
}
ShareQueue CurrentQueue;
pthread_mutex_t QueueMutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t QueueCond = PTHREAD_COND_INITIALIZER;
typedef struct _PoolBroadcastInfo
{
int poolsocket;
WorkerInfo WorkerData;
} PoolBroadcastInfo;
int sendit(int fd, char *buf, int len)
{
int rc;
do
{
rc = send(fd, buf, len, 0);
if (rc == -1)
return rc;
buf += rc;
len -= rc;
} while (len > 0);
return rc < 1 ? -1 : 0;
}
SSL_CTX* InitCTX()
{ const SSL_METHOD *method;
SSL_CTX *ctx;
SSL_library_init();
OpenSSL_add_all_algorithms(); /* Load cryptos, et.al. */
SSL_load_error_strings(); /* Bring in and register error messages */
method = SSLv23_client_method(); /* Create new client-method instance */
ctx = SSL_CTX_new(method); /* Create new context */
if ( ctx == NULL )
{
ERR_print_errors_fp(stderr);
abort();
}
SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1);
SSL_CTX_set_mode(ctx, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
return ctx;
}
void print_ssl_error() {
unsigned long errcode;
char errstring[256];
errcode = ERR_get_error();
ERR_error_string(errcode, errstring);
Log(LOG_CRITICAL, errstring);
}
int ssl_sendit(SSL *ssl, char *buf, int len, int sockfd)
{
int rc;
do
{
rc = SSL_write(ssl, buf, len);
if (rc == -1) {
print_ssl_error();
return rc;
}
buf += rc;
len -= rc;
} while (len > 0);
return rc < 1 ? -1 : 0;
}
int ssl_test(SSL *ssl, char *buf, int len, int sockfd)
{
return ssl_sendit(ssl, buf, len, sockfd);
/*
int rc;
do
{
rc = SSL_write(ssl, buf, len);
if (rc == -1) {
print_ssl_error();
return rc;
}
// if (rc > 0) {
// Log(LOG_CRITICAL, "SSL_sendit returned %d", rc);
// return rc;
// }
buf += rc;
len -= rc;
} while (len > 0);
return rc < 1 ? -1 : 0;
*/
}
int read_write(SSL *ssl, int sock, char *rawresponse, size_t *PartialMessageOffset)
{
//Log(LOG_CRITICAL, "Inside read_write");
int width;
int r,c2sl=0,c2s_offset=0;
int read_blocked_on_write=0,write_blocked_on_read=0,read_blocked=0;
fd_set readfds,writefds;
int shutdown_wait=0;
char c2s[STRATUM_MAX_MESSAGE_LEN_BYTES],s2c[STRATUM_MAX_MESSAGE_LEN_BYTES];
int ofcmode;
struct timeval timeout;
/*First we make the socket nonblocking*/
//SetNonBlockingSocket(sock);
width=sock+1;
//while(1){
FD_ZERO(&readfds);
FD_ZERO(&writefds);
FD_SET(sock,&readfds);
/* If we're waiting for a read on the socket don't
try to write to the server */
if(!write_blocked_on_read){
/* If we have data in the write queue don't try to
read from stdin */
if(c2sl || read_blocked_on_write)
FD_SET(sock,&writefds);
else
FD_SET(sock,&readfds);
}
timeout.tv_sec = 1;
timeout.tv_usec = 0;
//Log(LOG_CRITICAL, "Before read select");
// r=select(width,&readfds,&writefds,0,&timeout);
//if(r==0) {
//Log(LOG_CRITICAL, "Select returned 0");
//continue;
//}
//if (r < 0) {
//Log(LOG_CRITICAL, "Select had an error");
//}
//if (r > 0) {
//Log(LOG_CRITICAL, "Select returned something");
//}
r = SSL_pending(ssl);
/* Now check if there's data to read */
if((FD_ISSET(sock,&readfds) && !write_blocked_on_read) ||
(read_blocked_on_write && FD_ISSET(sock,&writefds))){
//Log(LOG_CRITICAL, "There is data to read");
do {
read_blocked_on_write=0;
read_blocked=0;
//Log(LOG_CRITICAL, "Before ssl read");
r=SSL_read(ssl, rawresponse, STRATUM_MAX_MESSAGE_LEN_BYTES);
switch(SSL_get_error(ssl,r)){
case SSL_ERROR_NONE:
// Log(LOG_CRITICAL, "ERROR NONE ON SSL READ");
/* Note: this call could block, which blocks the
entire application. It's arguable this is the
right behavior since this is essentially a terminal
client. However, in some other applications you
would have to prevent this condition */
//fwrite(s2c,1,r,stdout);
return r;
break;
case SSL_ERROR_ZERO_RETURN:
//Log(LOG_CRITICAL, "ZERO RETURN ON SSL READ");
/* End of data */
break;
case SSL_ERROR_WANT_READ:
Log(LOG_CRITICAL, "WANT READ ON SSL READ");
read_blocked=1;
break;
/* We get a WANT_WRITE if we're
trying to rehandshake and we block on
a write during that rehandshake.
We need to wait on the socket to be
writeable but reinitiate the read
when it is */
case SSL_ERROR_WANT_WRITE:
Log(LOG_CRITICAL, "WANT WRITE ON SSL READ");
read_blocked_on_write=1;
break;
default:
Log(LOG_CRITICAL, "SSL read problem");
print_ssl_error();
}
/* We need a check for read_blocked here because
SSL_pending() doesn't work properly during the
handshake. This check prevents a busy-wait
loop around SSL_read() */
} while (SSL_pending(ssl) && !read_blocked);
}
c2sl = STRATUM_MAX_MESSAGE_LEN_BYTES;
//Log(LOG_CRITICAL, "Before writes select");
//r=select(width,NULL,&writefds,0,&timeout);
//if(r==0) {
//Log(LOG_CRITICAL, "Select returned 0");
//continue;
//}
//if (r < 0) {
//Log(LOG_CRITICAL, "Select had an error");
//}
//if (r > 0) {
//Log(LOG_CRITICAL, "Select returned something");
//}
/* If the socket is writeable... */
//Log(LOG_CRITICAL, "Check if socket is writable");
if((FD_ISSET(sock,&writefds) && c2sl) ||
(write_blocked_on_read && FD_ISSET(sock,&readfds))) {
write_blocked_on_read=0;
/* Try to write */
Log(LOG_CRITICAL, "Sending %s with length %z", c2s, c2sl);
if (c2s[0] == '\0') {
Log(LOG_CRITICAL, "It IS EMPTY");
}
r=SSL_write(ssl,c2s+c2s_offset,c2sl);
switch(SSL_get_error(ssl,r)){
/* We wrote something*/
case SSL_ERROR_NONE:
Log(LOG_CRITICAL, "ERROR NONE ON SSL WRITE");
c2sl-=r;
c2s_offset+=r;
break;
/* We would have blocked */
case SSL_ERROR_WANT_WRITE:
Log(LOG_CRITICAL, "WANT WRITE ON SSL WRITE");
break;
/* We get a WANT_READ if we're
trying to rehandshake and we block on
write during the current connection.
We need to wait on the socket to be readable
but reinitiate our write when it is */
case SSL_ERROR_WANT_READ:
Log(LOG_CRITICAL, "WANT READ ON SSL WRITE");
write_blocked_on_read=1;
break;
/* Some other error */
default:
Log(LOG_CRITICAL, "SSL write problem");
print_ssl_error();
}
}
//}
end:
//Log(LOG_CRITICAL, "read_write end");
//SSL_free(ssl);
//close(sock);
return -1;
}
#define BIG_BUF_LEN 262144
#define JSON_BUF_LEN 345
void *PoolBroadcastThreadProc(void *Info)
{
Log(LOG_CRITICAL, "Inside pool broadcastthreadproc");
uint64_t id = 10;
PoolInfo *pbinfo = (PoolInfo *)Info;
char s[JSON_BUF_LEN];
void *c_ctx = cryptonight_ctx();
pthread_mutex_lock(&QueueMutex);
for(;;)
{
Log(LOG_CRITICAL, "Inside the pool broadcast first for loop");
pthread_cond_wait(&QueueCond, &QueueMutex);
for(Share *CurShare = RemoveShare(&CurrentQueue); CurShare; CurShare = RemoveShare(&CurrentQueue))
{
Log(LOG_CRITICAL, "Inside the pool broadcast second for loop");
char ASCIINonce[9], ASCIIResult[65];
uint8_t HashResult[32];
int ret, len;
BinaryToASCIIHex(ASCIINonce, &CurShare->Nonce, 4U);
if (!CurShare->Gothash) {
((uint32_t *)(CurShare->Job->XMRBlob + 39))[0] = CurShare->Nonce;
int variant = ((uint8_t*)CurShare->Job->XMRBlob)[0] >= 7 ? ((uint8_t*)CurShare->Job->XMRBlob)[0] - 6 : 0;
cryptonight_hash_ctx(HashResult, CurShare->Job->XMRBlob, CurShare->Job->XMRBlobLen, cryptonight_ctx() /*c_ctx*/, variant);
BinaryToASCIIHex(ASCIIResult, HashResult, 32);
} else {
BinaryToASCIIHex(ASCIIResult, CurShare->Blob, 32);
}
len = snprintf(s, JSON_BUF_LEN,
"{\"method\": \"submit\", \"params\": {\"id\": \"%s\", "
"\"job_id\": \"%s\", \"nonce\": \"%s\", \"result\": \"%s\"}, "
"\"id\":1}\r\n\n",
pbinfo->XMRAuthID, CurShare->Job->ID, ASCIINonce, ASCIIResult);
FreeShare(CurShare);
pthread_mutex_lock(&StatusMutex);
GlobalStatus.SolvedWork++;
pthread_mutex_unlock(&StatusMutex);
Log(LOG_NETDEBUG, "Request: %s", s);
Log(LOG_CRITICAL, "Request: %s", s);
ret = ssl_sendit(pbinfo->ssl, s, len, pbinfo->sockfd);
if (ret == -1) {
Log(LOG_CRITICAL, "Unable to send share");
print_ssl_error();
break;
}
}
}
pthread_mutex_unlock(&QueueMutex);
// free(c_ctx);
return(NULL);
}
int32_t XMRSetKernelArgs(AlgoContext *HashData, void *HashInput, uint64_t Target)
{
cl_int retval;
cl_uint zero = 0;
size_t GlobalThreads = HashData->GlobalSize, LocalThreads = HashData->WorkSize;
if(!HashData || !HashInput || HashData->InputLen < 1) return(ERR_STUPID_PARAMS);
const uint8_t version = ((const uint8_t*)HashInput)[0];
int variant = version >= 7 ? version - 6 : 0;
retval = clEnqueueWriteBuffer(*HashData->CommandQueues, HashData->InputBuffer, CL_TRUE, 0, HashData->InputLen, HashInput, 0, NULL, NULL);
fprintf(stderr, "VARIANT, VER %i, %i", variant, version);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueWriteBuffer to fill input buffer.", retval);
return(ERR_OCL_API);
}
#define CL_SET_ARG(k, n, s, v) do { \
retval = clSetKernelArg(HashData->Kernels[k], n, s, v); \
if(retval != CL_SUCCESS) { \
Log(LOG_CRITICAL, "Error %d when calling clSetKernelArg for kernel %d argument %d.", retval, k, n); \
return(ERR_OCL_API); \
} } while(0);
CL_SET_ARG(0, 0, sizeof(cl_mem), &HashData->InputBuffer);
CL_SET_ARG(0, 1, sizeof(cl_int), &HashData->InputLen);
// Scratchpads
CL_SET_ARG(0, 2, sizeof(cl_mem), HashData->ExtraBuffers + 0);
// States
CL_SET_ARG(0, 3, sizeof(cl_mem), HashData->ExtraBuffers + 1);
// CN2 Kernel
// Scratchpads
CL_SET_ARG(1, 0, sizeof(cl_mem), HashData->ExtraBuffers + 0);
// States
CL_SET_ARG(1, 1, sizeof(cl_mem), HashData->ExtraBuffers + 1);
// Variants
CL_SET_ARG(1, 2, sizeof(cl_int), &variant);
CL_SET_ARG(1, 3, sizeof(cl_mem), &HashData->InputBuffer);
// CN3 Kernel
// Scratchpads
CL_SET_ARG(2, 0, sizeof(cl_mem), HashData->ExtraBuffers + 0);
// States
CL_SET_ARG(2, 1, sizeof(cl_mem), HashData->ExtraBuffers + 1);
// Branch 0
CL_SET_ARG(2, 2, sizeof(cl_mem), HashData->ExtraBuffers + 2);
// Branch 1
CL_SET_ARG(2, 3, sizeof(cl_mem), HashData->ExtraBuffers + 3);
// Branch 2
CL_SET_ARG(2, 4, sizeof(cl_mem), HashData->ExtraBuffers + 4);
// Branch 3
CL_SET_ARG(2, 5, sizeof(cl_mem), HashData->ExtraBuffers + 5);
for(int i = 0; i < 4; ++i)
{
// States
CL_SET_ARG(i+3, 0, sizeof(cl_mem), HashData->ExtraBuffers + 1);
// Nonce buffer
CL_SET_ARG(i+3, 1, sizeof(cl_mem), HashData->ExtraBuffers + (i + 2));
// Output
CL_SET_ARG(i+3, 2, sizeof(cl_mem), &HashData->OutputBuffer);
// Target
CL_SET_ARG(i+3, 3, sizeof(cl_ulong), &Target);
}
return(ERR_SUCCESS);
}
int32_t RunXMRTest(AlgoContext *HashData, void *HashOutput)
{
cl_int retval;
cl_uint zero = 0;
size_t GlobalThreads = HashData->GlobalSize, LocalThreads = HashData->WorkSize;
size_t BranchNonces[4] = {0};
if(!HashData || !HashOutput) return(ERR_STUPID_PARAMS);
for(int i = 2; i < 6; ++i)
{
retval = clEnqueueWriteBuffer(*HashData->CommandQueues, HashData->ExtraBuffers[i], CL_FALSE, sizeof(cl_uint) * GlobalThreads, sizeof(cl_uint), &zero, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueWriteBuffer to zero branch buffer counter %d.", retval, i - 2);
return(ERR_OCL_API);
}
}
retval = clEnqueueWriteBuffer(*HashData->CommandQueues, HashData->OutputBuffer, CL_FALSE, sizeof(cl_uint) * 0xFF, sizeof(cl_uint), &zero, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueReadBuffer to fetch results.", retval);
return(ERR_OCL_API);
}
clFinish(*HashData->CommandQueues);
size_t Nonce[2] = {HashData->Nonce, 1}, gthreads[2] = { GlobalThreads, 8 }, lthreads[2] = { LocalThreads, 8 };
{
retval = clEnqueueNDRangeKernel(*HashData->CommandQueues, HashData->Kernels[0], 2, Nonce, gthreads, lthreads, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueNDRangeKernel for kernel %d.", retval, 0);
return(ERR_OCL_API);
}
}
/*for(int i = 1; i < 3; ++i)
{
retval = clEnqueueNDRangeKernel(*HashData->CommandQueues, HashData->Kernels[i], 1, &HashData->Nonce, &GlobalThreads, &LocalThreads, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueNDRangeKernel for kernel %d.", retval, i);
return(ERR_OCL_API);
}
}*/
retval = clEnqueueNDRangeKernel(*HashData->CommandQueues, HashData->Kernels[1], 1, &HashData->Nonce, &GlobalThreads, &LocalThreads, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueNDRangeKernel for kernel %d.", retval, 1);
return(ERR_OCL_API);
}
retval = clEnqueueNDRangeKernel(*HashData->CommandQueues, HashData->Kernels[2], 2, Nonce, gthreads, lthreads, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueNDRangeKernel for kernel %d.", retval, 2);
return(ERR_OCL_API);
}
retval = clEnqueueReadBuffer(*HashData->CommandQueues, HashData->ExtraBuffers[2], CL_FALSE, sizeof(cl_uint) * GlobalThreads, sizeof(cl_uint), BranchNonces, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueReadBuffer to fetch results.", retval);
return(ERR_OCL_API);
}
retval = clEnqueueReadBuffer(*HashData->CommandQueues, HashData->ExtraBuffers[3], CL_FALSE, sizeof(cl_uint) * GlobalThreads, sizeof(cl_uint), BranchNonces + 1, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueReadBuffer to fetch results.", retval);
return(ERR_OCL_API);
}
retval = clEnqueueReadBuffer(*HashData->CommandQueues, HashData->ExtraBuffers[4], CL_FALSE, sizeof(cl_uint) * GlobalThreads, sizeof(cl_uint), BranchNonces + 2, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueReadBuffer to fetch results.", retval);
return(ERR_OCL_API);
}
retval = clEnqueueReadBuffer(*HashData->CommandQueues, HashData->ExtraBuffers[5], CL_FALSE, sizeof(cl_uint) * GlobalThreads, sizeof(cl_uint), BranchNonces + 3, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueReadBuffer to fetch results.", retval);
return(ERR_OCL_API);
}
clFinish(*HashData->CommandQueues);
for(int i = 0; i < 4; ++i)
{
if(BranchNonces[i])
{
// Threads
retval = clSetKernelArg(HashData->Kernels[i + 3], 4, sizeof(cl_ulong), BranchNonces + i);
BranchNonces[i] += BranchNonces[i] + (LocalThreads - (BranchNonces[i] & (LocalThreads - 1)));
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clSetKernelArg for kernel %d, argument %d.", retval, i + 3, 4);
return(ERR_OCL_API);
}
retval = clEnqueueNDRangeKernel(*HashData->CommandQueues, HashData->Kernels[i + 3], 1, &HashData->Nonce, BranchNonces + i, &LocalThreads, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
//Log(LOG_CRITICAL, "Error %d when calling clEnqueueNDRangeKernel for kernel %d.", retval, i + 1);
Log(LOG_CRITICAL, "Error %d when calling clEnqueueNDRangeKernel for kernel %d.", retval, i + 3);
return(ERR_OCL_API);
}
}
}
retval = clEnqueueReadBuffer(*HashData->CommandQueues, HashData->OutputBuffer, CL_TRUE, 0, sizeof(cl_uint) * 0x100, HashOutput, 0, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clEnqueueReadBuffer to fetch results.", retval);
return(ERR_OCL_API);
}
clFinish(*HashData->CommandQueues);
HashData->Nonce += GlobalThreads;
return(ERR_SUCCESS);
}
void XMRCleanup(AlgoContext *HashData)
{
//for(int i = 0; i < 5; ++i) clReleaseKernel(HashData->Kernels[i]);
for(int i = 0; i < 7; ++i) clReleaseKernel(HashData->Kernels[i]);
clReleaseProgram(HashData->Program);
clReleaseMemObject(HashData->InputBuffer);
for(int i = 0; i < 6; ++i) clReleaseMemObject(HashData->ExtraBuffers[i]);
clReleaseMemObject(HashData->OutputBuffer);
free(HashData->ExtraBuffers);
clReleaseCommandQueue(*HashData->CommandQueues);
free(HashData->CommandQueues);
free(HashData->GPUIdxs);
}
int32_t SetupXMRTest(AlgoContext *HashData, OCLPlatform *OCL, uint32_t DeviceIdx)
{
size_t len;
cl_int retval;
char *KernelSource, *BuildLog, *Options;
size_t GlobalThreads = OCL->Devices[DeviceIdx].rawIntensity, LocalThreads = OCL->Devices[DeviceIdx].WorkSize;
#ifdef CL_VERSION_2_0
const cl_queue_properties CommandQueueProperties[] = { 0, 0, 0 };
#else
const cl_command_queue_properties CommandQueueProperties = { 0 };
#endif
// Sanity checks
if(!HashData || !OCL) return(ERR_STUPID_PARAMS);
HashData->GlobalSize = GlobalThreads;
HashData->WorkSize = LocalThreads;
HashData->CommandQueues = (cl_command_queue *)malloc(sizeof(cl_command_queue));
#ifdef CL_VERSION_2_0
*HashData->CommandQueues = clCreateCommandQueueWithProperties(OCL->Context, OCL->Devices[DeviceIdx].DeviceID, CommandQueueProperties, &retval);
#else
*HashData->CommandQueues = clCreateCommandQueue(OCL->Context, OCL->Devices[DeviceIdx].DeviceID, CommandQueueProperties, &retval);
#endif
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateCommandQueueWithProperties.", retval);
return(ERR_OCL_API);
}
// One extra buffer for the scratchpads is required, one for the states, and one for
// each of the four possible branches at the end.
HashData->ExtraBuffers = (cl_mem *)malloc(sizeof(cl_mem) * 6);
HashData->InputBuffer = clCreateBuffer(OCL->Context, CL_MEM_READ_ONLY, 80, NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create input buffer.", retval);
return(ERR_OCL_API);
}
// Scratchpads
HashData->ExtraBuffers[0] = clCreateBuffer(OCL->Context, CL_MEM_READ_WRITE, (1 << 21) * GlobalThreads, NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create hash scratchpads buffer.", retval);
return(ERR_OCL_API);
}
// States
HashData->ExtraBuffers[1] = clCreateBuffer(OCL->Context, CL_MEM_READ_WRITE, 200 * GlobalThreads, NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create hash states buffer.", retval);
return(ERR_OCL_API);
}
// Blake-256 branches
HashData->ExtraBuffers[2] = clCreateBuffer(OCL->Context, CL_MEM_READ_WRITE, sizeof(cl_uint) * (GlobalThreads + 2), NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create Branch 0 buffer.", retval);
return(ERR_OCL_API);
}
// Groestl-256 branches
HashData->ExtraBuffers[3] = clCreateBuffer(OCL->Context, CL_MEM_READ_WRITE, sizeof(cl_uint) * (GlobalThreads + 2), NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create Branch 1 buffer.", retval);
return(ERR_OCL_API);
}
// JH-256 branches
HashData->ExtraBuffers[4] = clCreateBuffer(OCL->Context, CL_MEM_READ_WRITE, sizeof(cl_uint) * (GlobalThreads + 2), NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create Branch 2 buffer.", retval);
return(ERR_OCL_API);
}
// Skein-512 branches
HashData->ExtraBuffers[5] = clCreateBuffer(OCL->Context, CL_MEM_READ_WRITE, sizeof(cl_uint) * (GlobalThreads + 2), NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create Branch 3 buffer.", retval);
return(ERR_OCL_API);
}
// Assume we may find up to 0xFF nonces in one run - it's reasonable
HashData->OutputBuffer = clCreateBuffer(OCL->Context, CL_MEM_READ_WRITE, sizeof(cl_uint) * 0x100, NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateBuffer to create output buffer.", retval);
return(ERR_OCL_API);
}
len = LoadTextFile(&KernelSource, "cryptonight.cl");
HashData->Program = clCreateProgramWithSource(OCL->Context, 1, (const char **)&KernelSource, NULL, &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateProgramWithSource on the contents of %s.", retval, "cryptonight.cl");
return(ERR_OCL_API);
}
Options = (char *)malloc(sizeof(char) * 32);
snprintf(Options, 31, "-I. -DWORKSIZE=%d", (uint32_t)LocalThreads);
retval = clBuildProgram(HashData->Program, 1, &OCL->Devices[DeviceIdx].DeviceID, Options, NULL, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clBuildProgram.", retval);
retval = clGetProgramBuildInfo(HashData->Program, OCL->Devices[DeviceIdx].DeviceID, CL_PROGRAM_BUILD_LOG, 0, NULL, &len);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clGetProgramBuildInfo for length of build log output.", retval);
return(ERR_OCL_API);
}
BuildLog = (char *)malloc(sizeof(char) * (len + 2));
retval = clGetProgramBuildInfo(HashData->Program, OCL->Devices[DeviceIdx].DeviceID, CL_PROGRAM_BUILD_LOG, len, BuildLog, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clGetProgramBuildInfo for build log.", retval);
return(ERR_OCL_API);
}
Log(LOG_CRITICAL, "Build Log:\n%s", BuildLog);
free(BuildLog);
return(ERR_OCL_API);
}
cl_build_status status;
do
{
retval = clGetProgramBuildInfo(HashData->Program, OCL->Devices[DeviceIdx].DeviceID, CL_PROGRAM_BUILD_STATUS, sizeof(cl_build_status), &status, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clGetProgramBuildInfo for status of build.", retval);
return(ERR_OCL_API);
}
sleep(1);
} while(status == CL_BUILD_IN_PROGRESS);
retval = clGetProgramBuildInfo(HashData->Program, OCL->Devices[DeviceIdx].DeviceID, CL_PROGRAM_BUILD_LOG, 0, NULL, &len);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clGetProgramBuildInfo for length of build log output.", retval);
return(ERR_OCL_API);
}
BuildLog = (char *)malloc(sizeof(char) * (len + 2));
retval = clGetProgramBuildInfo(HashData->Program, OCL->Devices[DeviceIdx].DeviceID, CL_PROGRAM_BUILD_LOG, len, BuildLog, NULL);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clGetProgramBuildInfo for build log.", retval);
return(ERR_OCL_API);
}
Log(LOG_DEBUG, "Build Log:\n%s", BuildLog);
free(BuildLog);
free(KernelSource);
HashData->Kernels = (cl_kernel *)malloc(sizeof(cl_kernel) * 7);
const char *KernelNames[] = { "cn0", "cn1", "cn2", "Blake", "Groestl", "JH", "Skein" };
for(int i = 0; i < 7; ++i)
{
HashData->Kernels[i] = clCreateKernel(HashData->Program, KernelNames[i], &retval);
if(retval != CL_SUCCESS)
{
Log(LOG_CRITICAL, "Error %d when calling clCreateKernel for kernel %s.", retval, KernelNames[i]);
return(ERR_OCL_API);
}
}
HashData->Nonce = 0;
// Hardcode one GPU per thread in this version
HashData->GPUIdxs = (size_t *)malloc(sizeof(size_t));
*HashData->GPUIdxs = DeviceIdx;
return(ERR_SUCCESS);
}
static void RestartMiners(PoolInfo *Pool)
{
for(int i = 0; i < Pool->MinerThreadCount; ++i)
atomic_store(RestartMining + i, true);
}
static const char getblkc[] = "POST /json_rpc HTTP/1.0\r\nContent-Length: 27\r\n\r\n"
"{\"method\": \"getblockcount\"}";
#define WALLETLEN 95
static char getblkt[] = "POST /json_rpc HTTP/1.0\r\nContent-Length: 178\r\n\r\n"
"{\"method\": \"getblocktemplate\", \"params\": {\"reserve_size\": 8, \"wallet_address\": "
"\"9xaXMreKDK7bctpHtTE9zUUTgffkRvwZJ7UvyJGAQHkvBFqUYWwhVWWendW6NAdvtB8nn883WQxtU7cpe5eyJiUxLZ741t5\"}}";
void *StratumThreadProc(void *InfoPtr)
{
uint64_t id = 1;
JobInfo *NextJob;
char *workerinfo[3];
int poolsocket, bytes, ret;
size_t PartialMessageOffset;
char rawresponse[STRATUM_MAX_MESSAGE_LEN_BYTES], partial[STRATUM_MAX_MESSAGE_LEN_BYTES];
PoolInfo *Pool = (PoolInfo *)InfoPtr;
bool GotSubscriptionResponse = false, GotFirstJob = false;
char s[JSON_BUF_LEN];
int len;
SSL_CTX *ctx;
SSL *ssl;
poolsocket = Pool->sockfd;
len = snprintf(s, JSON_BUF_LEN, "{\"method\": \"login\", \"params\": "
"{\"login\": \"%s\", \"pass\": \"%s\", "
"\"agent\": \"wolf-hyc-xmr-miner/0.1\"}, \"id\": 1}\r\n\n",
Pool->WorkerData.User, Pool->WorkerData.Pass);
Log(LOG_NETDEBUG, "Request: %s", s);
/* Do SSL stuff */
ctx = InitCTX();
ssl = SSL_new(ctx);
SSL_set_cipher_list(ssl, "HIGH:!aNULL:!PSK:!SRP:!MD5:!RC4:!SHA1");
SSL_set_fd(ssl, Pool->sockfd);
SSL_set_connect_state(ssl);
ret = SSL_connect(ssl);
if (ret != 1) {
print_ssl_error();
return(0);
}
uint32_t hs = SSL_do_handshake(ssl);
Log(LOG_CRITICAL, "Protected with encryption %s", SSL_get_cipher(ssl));
Pool->ssl = ssl;
ret = ssl_sendit(Pool->ssl, s, len, Pool->sockfd);
if (ret == -1) {
return(NULL);
}
PartialMessageOffset = 0;
NextJob = &Jobs[0];
// Listen for work until termination.
for(;;)
{
fd_set readfds;
uint32_t bufidx, MsgLen;
struct timeval timeout;
char StratumMsg[STRATUM_MAX_MESSAGE_LEN_BYTES];
char isOpen[JSON_BUF_LEN] = "isitopen";
timeout.tv_sec = 480;
timeout.tv_usec = 0;
FD_ZERO(&readfds);
FD_SET(Pool->sockfd, &readfds);
ret = read_write(ssl, Pool->sockfd, rawresponse, &PartialMessageOffset);
while (ret < 0) {
uint32_t hs = SSL_do_handshake(Pool->ssl);
int isSocketOpen = ssl_test(Pool->ssl, isOpen, strlen(isOpen), Pool->sockfd);
if (isSocketOpen < 0) {
retry2:
Log(LOG_CRITICAL, "Inside retry2");
Log(LOG_NOTIFY, "Stratum connection to pool timed out.");
closesocket(poolsocket);
SSL_free(ssl);
Pool->ssl = NULL;
retry:
Log(LOG_CRITICAL, "Inside retry");
poolsocket = Pool->sockfd = ConnectToPool(Pool->StrippedURL, Pool->Port);
//SetNonBlockingSocket(Pool->sockfd);
ssl = SSL_new(ctx);
SSL_set_cipher_list(ssl, "HIGH:!aNULL:!PSK:!SRP:!MD5:!RC4:!SHA1");
SSL_set_fd(ssl, Pool->sockfd);
SSL_connect(ssl);
Pool->ssl = ssl;
// TODO/FIXME: This exit is bad and should be replaced with better flow control
if(Pool->sockfd == INVALID_SOCKET)
{
Log(LOG_ERROR, "Unable to reconnect to pool. Sleeping 10 seconds...\n");
sleep(1000);
goto retry;
}
Log(LOG_NOTIFY, "Reconnected to pool... authenticating...");
reauth:
RestartMiners(Pool);
Log(LOG_CRITICAL, "Inside reauth");
Log(LOG_NETDEBUG, "Request: %s", s);
ret = ssl_sendit(ssl, s, len, poolsocket);
if (ret == -1) {
print_ssl_error();
}
PartialMessageOffset = 0;
Log(LOG_NOTIFY, "Reconnected to pool.");
ret = read_write(ssl, Pool->sockfd, rawresponse, &PartialMessageOffset);
}
}
rawresponse[ret] = 0x00;
bufidx = 0;
while(strchr(rawresponse + bufidx, '\n'))
{
// Log(LOG_CRITICAL, "Inside read return");
json_t *msg, *msgid, *method;
json_error_t err;
MsgLen = strchr(rawresponse + bufidx, '\n') - (rawresponse + bufidx) + 1;
memcpy(StratumMsg, rawresponse + bufidx, MsgLen);
StratumMsg[MsgLen] = 0x00;
bufidx += MsgLen;
Log(LOG_NETDEBUG, "Got something: %s", StratumMsg);
Log(LOG_CRITICAL, "Got something: %s", StratumMsg);
msg = json_loads(StratumMsg, 0, NULL);
if(!msg)
{
Log(LOG_CRITICAL, "Error parsing JSON from pool server.");
closesocket(poolsocket);
return(NULL);
}
msgid = json_object_get(msg, "id");
// If the "id" field exists, it's either the reply to the
// login, and contains the first job, or is a share
// submission response, at least in this butchered XMR Stratum
// The ID is also stupidly hardcoded to 1 in EVERY case.
// No ID field means new job
// Also, error responses to shares have no result
if(msgid && json_integer_value(msgid))
{
json_t *result = json_object_get(msg, "result");
json_t *authid = NULL;
//if(!result)
//{
// Log(LOG_CRITICAL, "Server sent a message with an ID and no result field.");
// json_decref(msg);
// close(poolsocket);
// return(NULL);
//}
// Only way to tell the two apart is that the result
// object on a share submission response has ONLY
// the status string.
if(result) authid = json_object_get(result, "id");
// Must be a share submission response if NULL
// Otherwise, it's the first job.
if(!authid)
{
double TotalHashrate = 0;
json_t *result = json_object_get(msg, "result");
json_t *err = json_object_get(msg, "error");
pthread_mutex_lock(&StatusMutex);
if(json_is_null(err) && !strcmp(json_string_value(json_object_get(result, "status")), "OK"))
{
Log(LOG_INFO, "Share accepted: %d/%d (%.02f%%)", GlobalStatus.SolvedWork - GlobalStatus.RejectedWork, GlobalStatus.SolvedWork, (double)(GlobalStatus.SolvedWork - GlobalStatus.RejectedWork) / GlobalStatus.SolvedWork * 100.0);
}
else
{
const char *errmsg;
GlobalStatus.RejectedWork++;
errmsg = json_string_value(json_object_get(err, "message"));
Log(LOG_INFO, "Share rejected (%s): %d/%d (%.02f%%)", errmsg, GlobalStatus.SolvedWork - GlobalStatus.RejectedWork, GlobalStatus.SolvedWork, (double)(GlobalStatus.SolvedWork - GlobalStatus.RejectedWork) / GlobalStatus.SolvedWork * 100.0);
if (!strcasecmp("Unauthenticated", errmsg)) {
RestartMiners(Pool);
pthread_mutex_unlock(&StatusMutex);
goto reauth;
}
}
for(int i = 0; i < Pool->MinerThreadCount; ++i)
{
TotalHashrate += GlobalStatus.ThreadHashCounts[i] / GlobalStatus.ThreadTimes[i];
}
Log(LOG_INFO, "Total Hashrate: %.02fH/s\n", TotalHashrate);
pthread_mutex_unlock(&StatusMutex);
}
else
{
json_t *job, *blob, *jid, *target;
// cpuminer has it hardcoded to 64, so hell, no point
// in handling arbitrary sizes here
strcpy(Pool->XMRAuthID, json_string_value(authid));
job = json_object_get(result, "job");
if(!job)
{
Log(LOG_CRITICAL, "Server did not respond to login request with a job.");
json_decref(msg);
return(NULL);
}
blob = json_object_get(job, "blob");
jid = json_object_get(job, "job_id");
target = json_object_get(job, "target");
if(!blob || !jid || !target)
{
Log(LOG_CRITICAL, "Server sent invalid first job.");
json_decref(msg);
return(NULL);
}
const char *val = json_string_value(blob);
NextJob->XMRBlobLen = strlen(val) / 2;
ASCIIHexToBinary(NextJob->XMRBlob, val, NextJob->XMRBlobLen * 2);
strcpy(NextJob->ID, json_string_value(jid));
NextJob->XMRTarget = BSWAP32(strtoul(json_string_value(target), NULL, 16));
CurrentJob = NextJob;
JobIdx++;
NextJob = &Jobs[JobIdx&1];
Log(LOG_NOTIFY, "New job at diff %g", (double)0xffffffff / CurrentJob->XMRTarget);
CurrentJob->XMRTarget <<= 32;
CurrentJob->XMRTarget |= 0xffffffff;
}
json_decref(result);
}
else
{
method = json_object_get(msg, "method");
if(!method)
{
Log(LOG_CRITICAL, "Server message has no id field and doesn't seem to have a method field...");
json_decref(msg);
closesocket(poolsocket);
return(NULL);
}
if(!strcmp("job", json_string_value(method)))
{
json_t *job, *blob, *jid, *target;
job = json_object_get(msg, "params");
if(!job)
{
Log(LOG_CRITICAL, "Job notification sent no params.");
json_decref(msg);
return(NULL);
}
blob = json_object_get(job, "blob");
jid = json_object_get(job, "job_id");
target = json_object_get(job, "target");
if(!blob || !jid || !target)
{
Log(LOG_CRITICAL, "Server sent invalid job.");
json_decref(msg);
return(NULL);
}
const char *val = json_string_value(blob);
NextJob->XMRBlobLen = strlen(val) / 2;
ASCIIHexToBinary(NextJob->XMRBlob, val, NextJob->XMRBlobLen * 2);
strcpy(NextJob->ID, json_string_value(jid));
NextJob->XMRTarget = BSWAP32(strtoul(json_string_value(target), NULL, 16));
CurrentJob = NextJob;
JobIdx++;
NextJob = &Jobs[JobIdx&1];
// No cleanjobs param, so we flush every time
RestartMiners(Pool);
Log(LOG_NOTIFY, "New job at diff %g", (double)0xffffffff / CurrentJob->XMRTarget);
CurrentJob->XMRTarget <<= 32;
CurrentJob->XMRTarget |= 0xffffffff;
}
else
{
Log(LOG_NETDEBUG, "I have no idea what the fuck that message was.");
}
json_decref(msg);
}
}
memmove(rawresponse, rawresponse + bufidx, ret - bufidx);
PartialMessageOffset = ret - bufidx;
//Log(LOG_CRITICAL, "End of for loop");
}
}
// AlgoName must not be freed by the thread - cleanup is done by caller.
// RequestedWorksize and RequestedxIntensity should be zero if none was requested
typedef struct _MinerThreadInfo
{
uint32_t ThreadID;
uint32_t TotalMinerThreads;
OCLPlatform *PlatformContext;
AlgoContext AlgoCtx;
} MinerThreadInfo;
// Block header is 2 uint512s, 1024 bits - 128 bytes
void *MinerThreadProc(void *Info)
{
int32_t err;
double CurrentDiff;
int MyJobIdx;
JobInfo *MyJob;
char ThrID[128];
uint32_t TmpWork[32];
uint64_t Target;
uint32_t BlobLen;
MinerThreadInfo *MTInfo = (MinerThreadInfo *)Info;
uint32_t StartNonce = ((0xFFFFFFFFU / MTInfo->TotalMinerThreads) * MTInfo->ThreadID) & 0xFFFFFFFE;
uint32_t MaxNonce = StartNonce + (0xFFFFFFFFU / MTInfo->TotalMinerThreads);
uint32_t Nonce = StartNonce, PrevNonce, platform = 0, device = 1, CurENonce2;
struct cryptonight_ctx *ctx;
uint32_t *nonceptr = (uint32_t *)((char *)TmpWork + 39);
unsigned long hashes_done;
// Generate work for first run.
MyJobIdx = JobIdx;
MyJob = (JobInfo *)CurrentJob;
BlobLen = MyJob->XMRBlobLen;
memcpy(TmpWork, MyJob->XMRBlob, BlobLen);
Target = MyJob->XMRTarget;
if (MTInfo->PlatformContext) {
MTInfo->AlgoCtx.Nonce = StartNonce;
MTInfo->AlgoCtx.InputLen = BlobLen;
err = XMRSetKernelArgs(&MTInfo->AlgoCtx, TmpWork, Target);
if(err) return(NULL);
sprintf(ThrID, "Thread %d, GPU ID %zd, GPU Type: %s",
MTInfo->ThreadID, (uint32_t)*MTInfo->AlgoCtx.GPUIdxs, MTInfo->PlatformContext->Devices[*MTInfo->AlgoCtx.GPUIdxs].DeviceName);
} else {
ctx = cryptonight_ctx();
*nonceptr = StartNonce;
sprintf(ThrID, "Thread %d, (CPU)", MTInfo->ThreadID);
}
while(!ExitFlag)
{
TIME_TYPE begin, end;
atomic_store(RestartMining + MTInfo->ThreadID, false);
// If JobID is not equal to the current job ID, generate new work
// off the new job information.
// If JobID is the same as the current job ID, go hash.
if(MyJobIdx != JobIdx)
{
Log(LOG_DEBUG, "%s: Detected new job, regenerating work.", ThrID);
MyJobIdx = JobIdx;
MyJob = (JobInfo *)CurrentJob;
BlobLen = MyJob->XMRBlobLen;
memcpy(TmpWork, MyJob->XMRBlob, BlobLen);
Target = MyJob->XMRTarget;
if (MTInfo->PlatformContext) {
MTInfo->AlgoCtx.Nonce = StartNonce;
MTInfo->AlgoCtx.InputLen = BlobLen;
err = XMRSetKernelArgs(&MTInfo->AlgoCtx, TmpWork, Target);
if(err) return(NULL);
} else {
*nonceptr = StartNonce;
}
}
else
{
if (!MTInfo->PlatformContext)
++(*nonceptr);
}
PrevNonce = MTInfo->AlgoCtx.Nonce;
begin = MinerGetCurTime();
if (MTInfo->PlatformContext) {
do
{
cl_uint Results[0x100] = { 0 };
err = RunXMRTest(&MTInfo->AlgoCtx, Results);
if(err) return(NULL);
if(atomic_load(RestartMining + MTInfo->ThreadID) || ExitFlag) break;
for(int i = 0; i < Results[0xFF]; ++i)
{
Log(LOG_DEBUG, "%s: SHARE found (nonce 0x%.8X)!", ThrID, Results[i]);
pthread_mutex_lock(&QueueMutex);
Share *NewShare = GetShare();
NewShare->Nonce = Results[i];
NewShare->Gothash = 0;
NewShare->Job = MyJob;
SubmitShare(&CurrentQueue, NewShare);
pthread_cond_signal(&QueueCond);
pthread_mutex_unlock(&QueueMutex);
}
} while(MTInfo->AlgoCtx.Nonce < (PrevNonce + 1024));
} else {
const uint32_t first_nonce = *nonceptr;
uint32_t n = first_nonce - 1;
__declspec(align(64)) uint64_t hash[32/8];
int found = 0;
int variant = ((uint8_t*)TmpWork)[0] >= 7 ? ((uint8_t*)TmpWork)[0] - 6 : 0;
again:
do {
if (ExitFlag) break;
*nonceptr = ++n;
cryptonight_hash_ctx(hash, TmpWork, BlobLen, ctx, variant);
if (hash[3] < Target) {
found = 1;
} else if (atomic_load(RestartMining + MTInfo->ThreadID)) {
found = 2;
}
} while (!found && n < MaxNonce);
hashes_done = n - first_nonce;
if (found == 1) {
Log(LOG_DEBUG, "%s: SHARE found (nonce 0x%.8X)!", ThrID, *nonceptr);
pthread_mutex_lock(&QueueMutex);
Share *NewShare = GetShare();
NewShare->Nonce = *nonceptr;
NewShare->Gothash = 1;
memcpy(NewShare->Blob, hash, 32);
NewShare->Job = MyJob;
SubmitShare(&CurrentQueue, NewShare);
pthread_cond_signal(&QueueCond);
pthread_mutex_unlock(&QueueMutex);
}
}
end = MinerGetCurTime();
double Seconds = SecondsElapsed(begin, end);
pthread_mutex_lock(&StatusMutex);
if (MTInfo->PlatformContext)
hashes_done = MTInfo->AlgoCtx.Nonce - PrevNonce;
GlobalStatus.ThreadHashCounts[MTInfo->ThreadID] = hashes_done;
GlobalStatus.ThreadTimes[MTInfo->ThreadID] = Seconds;
pthread_mutex_unlock(&StatusMutex);
Log(LOG_INFO, "%s: %.02fH/s", ThrID, hashes_done / (Seconds));
}
if (MTInfo->PlatformContext)
XMRCleanup(&MTInfo->AlgoCtx);
return(NULL);
}
#ifdef __linux__
void SigHandler(int signal)
{
char c;
ExitFlag = true;
write(ExitPipe[1], &c, 1);
Log(LOG_CRITICAL, "Handling signal %s", c);
}
#else
BOOL SigHandler(DWORD signal)
{
ExitFlag = true;
return(TRUE);
}
#endif
// Signed types indicate there is no default value
// If they are negative, do not set them.
typedef struct _DeviceSettings
{
uint32_t Platform;
uint32_t Index;
uint32_t Threads;
uint32_t rawIntensity;
uint32_t Worksize;
int32_t CoreFreq;
int32_t MemFreq;
int32_t FanSpeedPercent;
int32_t PowerTune;
} DeviceSettings;
// Settings structure for a group of threads mining one algo.
// These threads may be running on diff GPUs, and there may
// be multiple threads per GPU.
typedef struct _AlgoSettings
{
char *AlgoName;
uint32_t NumGPUs;
DeviceSettings *GPUSettings;
uint32_t TotalThreads;
uint32_t PoolCount;
char **PoolURLs;
WorkerInfo *Workers;
json_t *AlgoSpecificConfig;
} AlgoSettings;
int ParseConfigurationFile(char *ConfigFileName, AlgoSettings *Settings)
{
json_t *Config;
json_error_t Error;
Config = json_load_file(ConfigFileName, JSON_REJECT_DUPLICATES, &Error);
if(!Config)
{
Log(LOG_CRITICAL, "Error loading configuration file: %s on line %d.", Error.text, Error.line);
return(-1);
}
json_t *AlgoObjArr = json_object_get(Config, "Algorithms");
if(!AlgoObjArr)
{
Log(LOG_CRITICAL, "No 'Algorithms' array found");
return(-1);
}
if(!json_array_size(AlgoObjArr))
{
Log(LOG_CRITICAL, "Algorithms array empty!");
return(-1);
}
json_t *AlgoObj = json_array_get(AlgoObjArr, 0);
json_t *AlgoName = json_object_get(AlgoObj, "name");
if(!AlgoName || !json_is_string(AlgoName))
{
Log(LOG_CRITICAL, "Algorithm name missing or not a string.");
return(-1);
}
json_t *DevsArr = json_object_get(AlgoObj, "devices");
if(!DevsArr || !json_array_size(DevsArr))
{
Log(LOG_CRITICAL, "No devices specified for algorithm %s.", json_string_value(AlgoName));
return(-1);
}
Settings->NumGPUs = json_array_size(DevsArr);
Settings->GPUSettings = (DeviceSettings *)malloc(sizeof(DeviceSettings) * Settings->NumGPUs);
Settings->TotalThreads = 0;
for(int i = 0; i < Settings->NumGPUs; ++i)
{
json_t *DeviceObj = json_array_get(DevsArr, i);
json_t *num = json_object_get(DeviceObj, "index");
if(!num || !json_is_integer(num))
{
Log(LOG_CRITICAL, "Device structure #%d for algo %s has no index.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
Settings->GPUSettings[i].Index = json_integer_value(num);
num = json_object_get(DeviceObj, "rawintensity");
if(!num || !json_is_integer(num) || !json_integer_value(num))
{
Log(LOG_CRITICAL, "Device structure #%d for algo %s has no rawintensity, or rawintensity is set to zero.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
Settings->GPUSettings[i].rawIntensity = json_integer_value(num);
num = json_object_get(DeviceObj, "worksize");
if(!num || !json_is_integer(num) || !json_integer_value(num))
{
Log(LOG_CRITICAL, "Device structure #%d for algo %s has no worksize, or worksize is set to zero.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
Settings->GPUSettings[i].Worksize = json_integer_value(num);
// Optional
num = json_object_get(DeviceObj, "threads");
if(num && !json_is_integer(num))
{
Log(LOG_CRITICAL, "Argument to threads in device structure #%d for algo %s is not an integer.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
if(num) Settings->GPUSettings[i].Threads = json_integer_value(num);
else Settings->GPUSettings[i].Threads = 1;
Settings->TotalThreads += Settings->GPUSettings[i].Threads;
num = json_object_get(DeviceObj, "corefreq");
if(num && !json_is_integer(num))
{
Log(LOG_CRITICAL, "Argument to corefreq in device structure #%d for algo %s is not an integer.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
if(num) Settings->GPUSettings[i].CoreFreq = json_integer_value(num);
else Settings->GPUSettings[i].CoreFreq = -1;
num = json_object_get(DeviceObj, "memfreq");
if(num && !json_is_integer(num))
{
Log(LOG_CRITICAL, "Argument to memfreq in device structure #%d for algo %s is not an integer.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
if(num) Settings->GPUSettings[i].MemFreq = json_integer_value(num);
else Settings->GPUSettings[i].MemFreq = -1;
num = json_object_get(DeviceObj, "fanspeed");
if(num && !json_is_integer(num))
{
Log(LOG_CRITICAL, "Argument to fanspeed in device structure #%d for algo %s is not an integer.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
if(num && ((json_integer_value(num) > 100) || (json_integer_value(num) < 0)))
{
Log(LOG_CRITICAL, "Argument to fanspeed in device structure #%d for algo %s is not a valid percentage (0 - 100).", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
}
if(num) Settings->GPUSettings[i].FanSpeedPercent = json_integer_value(num);
else Settings->GPUSettings[i].FanSpeedPercent = -1;
num = json_object_get(DeviceObj, "powertune");
if(num && !json_is_integer(num))
{
Log(LOG_CRITICAL, "Argument to powertune in device structure #%d for algo %s is not an integer.", i, json_string_value(AlgoName));
free(Settings->GPUSettings);
return(-1);
}
if(num) Settings->GPUSettings[i].PowerTune = json_integer_value(num);
else Settings->GPUSettings[i].PowerTune = 0;
}
// Remove the devices part from the algo object; it's
// not part of the algo specific options.
json_object_del(AlgoObj, "devices");
json_t *PoolsArr = json_object_get(AlgoObj, "pools");
if(!PoolsArr || !json_array_size(PoolsArr))
{
Log(LOG_CRITICAL, "No pools specified for algorithm %s.", json_string_value(AlgoName));
return(-1);
}
Settings->PoolURLs = (char **)malloc(sizeof(char *) * (json_array_size(PoolsArr) + 1));
Settings->Workers = (WorkerInfo *)malloc(sizeof(WorkerInfo) * ((json_array_size(PoolsArr) + 1)));
Settings->PoolCount = json_array_size(PoolsArr);
for(int i = 0; i < Settings->PoolCount; ++i)
{
json_t *PoolObj = json_array_get(PoolsArr, i);
json_t *PoolURL = json_object_get(PoolObj, "url");
json_t *PoolUser = json_object_get(PoolObj, "user");
json_t *PoolPass = json_object_get(PoolObj, "pass");
if(!PoolURL || !PoolUser || !PoolPass)
{
Log(LOG_CRITICAL, "Pool structure %d for algo %s is missing a URL, username, or password.", i, json_string_value(AlgoName));
return(-1);
}
Settings->PoolURLs[i] = (char *)malloc(sizeof(char) * (strlen(json_string_value(PoolURL)) + 1));
Settings->Workers[i].User = (char *)malloc(sizeof(char) * (strlen(json_string_value(PoolUser)) + 1));
Settings->Workers[i].Pass = (char *)malloc(sizeof(char) * (strlen(json_string_value(PoolPass)) + 1));
strcpy(Settings->PoolURLs[i], json_string_value(PoolURL));
strcpy(Settings->Workers[i].User, json_string_value(PoolUser));
strcpy(Settings->Workers[i].Pass, json_string_value(PoolPass));
Settings->Workers[i].NextWorker = NULL;
}
// Remove the pools part from the algo object; it's
// not part of the algo specific options.
json_object_del(AlgoObj, "pools");
Settings->AlgoSpecificConfig = AlgoObj;
Settings->AlgoName = (char *)malloc(sizeof(char) * (strlen(json_string_value(AlgoName)) + 1));
strcpy(Settings->AlgoName, json_string_value(AlgoName));
return(0);
}
void FreeSettings(AlgoSettings *Settings)
{
free(Settings->AlgoName);
free(Settings->GPUSettings);
for(int i = 0; i < Settings->PoolCount; ++i)
{
free(Settings->PoolURLs[i]);
free(Settings->Workers[i].User);
free(Settings->Workers[i].Pass);
}
free(Settings->PoolURLs);
free(Settings->Workers);
}
// Only doing IPv4 for now.
// We should connect to the pool in the main thread,
// then give the socket to threads that need it, so
// that the connection may be cleanly closed.
// TODO: Get Platform index from somewhere else
// TODO/FIXME: Check functions called for error.
int main(int argc, char **argv)
{
PoolInfo Pool = {0};
AlgoSettings Settings;
MinerThreadInfo *MThrInfo;
OCLPlatform PlatformContext;
int ret, poolsocket, PlatformIdx = 0;
pthread_t Stratum, ADLThread, BroadcastThread, *MinerWorker;
unsigned int tmp1, tmp2, tmp3, tmp4;
int use_aesni = 0;
int daemon = 0;
InitLogging(LOG_INFO);
if(argc != 2)
{
Log(LOG_CRITICAL, "Usage: %s <config file>", argv[0]);
return(0);
}
if (THISISATEST > 0) {
Log(LOG_CRITICAL, "Threads are enabled");
} else {
Log(LOG_CRITICAL, "Threads are not enabled");
}
if(ParseConfigurationFile(argv[1], &Settings)) return(0);
#ifdef __aarch64__
cryptonight_hash_ctx = cryptonight_hash_aesni;
#else
/*
if (__get_cpuid_max(0, &tmp1) >= 1) {
__get_cpuid(1, &tmp1, &tmp2, &tmp3, &tmp4);
if (tmp3 & 0x2000000)
use_aesni = 1;
}*/
if (use_aesni)
cryptonight_hash_ctx = cryptonight_hash_aesni;
else
cryptonight_hash_ctx = cryptonight_hash_dumb;
#endif
MThrInfo = (MinerThreadInfo *)malloc(sizeof(MinerThreadInfo) * Settings.TotalThreads);
MinerWorker = (pthread_t *)malloc(sizeof(pthread_t) * Settings.TotalThreads);
#ifdef __linux__
Log(LOG_CRITICAL, "At pipe");
pipe(ExitPipe);
struct sigaction ExitHandler;
memset(&ExitHandler, 0, sizeof(struct sigaction));
ExitHandler.sa_handler = SigHandler;
sigaction(SIGINT, &ExitHandler, NULL);
signal(SIGPIPE, SIG_IGN);
#else
SetConsoleCtrlHandler((PHANDLER_ROUTINE)SigHandler, TRUE);
#endif
RestartMining = (bool *)malloc(sizeof(bool) * Settings.TotalThreads);
char *TmpPort;
uint32_t URLOffset;
if(strstr(Settings.PoolURLs[0], "stratum+tcp://"))
URLOffset = strlen("stratum+tcp://");
else if(strstr(Settings.PoolURLs[0], "daemon+tcp://"))
{
URLOffset = strlen("daemon+tcp://");
daemon = 1;
}
else
URLOffset = 0;
if(strrchr(Settings.PoolURLs[0] + URLOffset, ':'))
TmpPort = strrchr(Settings.PoolURLs[0] + URLOffset, ':') + 1;
else
TmpPort = "3333";
char *StrippedPoolURL = (char *)malloc(sizeof(char) * (strlen(Settings.PoolURLs[0]) + 1));
int URLSize = URLOffset;
for(; Settings.PoolURLs[0][URLSize] != ':' && Settings.PoolURLs[0][URLSize]; ++URLSize)
StrippedPoolURL[URLSize - URLOffset] = Settings.PoolURLs[0][URLSize];
StrippedPoolURL[URLSize - URLOffset] = 0x00;
Log(LOG_DEBUG, "Parsed pool URL: %s", StrippedPoolURL);
ret = NetworkingInit();
if(ret)
{
Log(LOG_CRITICAL, "Failed to initialize networking with error code %d.", ret);
return(0);
}
// DO NOT FORGET THIS
Pool.StrippedURL = strdup(StrippedPoolURL);
Pool.Port = strdup(TmpPort);
Pool.WorkerData = Settings.Workers[0];
Pool.MinerThreadCount = Settings.TotalThreads;
Pool.MinerThreads = (uint32_t *)malloc(sizeof(uint32_t) * Pool.MinerThreadCount);
for(int i = 0; i < Settings.TotalThreads; ++i) Pool.MinerThreads[i] = Settings.GPUSettings[i].Index;
GlobalStatus.ThreadHashCounts = (double *)malloc(sizeof(double) * Settings.TotalThreads);
GlobalStatus.ThreadTimes = (double *)malloc(sizeof(double) * Settings.TotalThreads);
GlobalStatus.RejectedWork = 0;
GlobalStatus.SolvedWork = 0;
for(int i = 0; i < Settings.TotalThreads; ++i)
{
GlobalStatus.ThreadHashCounts[i] = 0;
GlobalStatus.ThreadTimes[i] = 0;
}
// Initialize ADL and apply settings to card
/*ADLInit();
for(int i = 0; i < Settings.NumGPUs; ++i)
{
ADLAdapterDynInfo Info;
ret = ADLGetStateInfo(Settings.GPUSettings[i].Index, &Info);
if(ret)
Log(LOG_ERROR, "ADLGetStateInfo() failed for GPU #%d with code %d.", Settings.GPUSettings[i].Index, ret);
Log(LOG_INFO, "Adapter #%d - Fan Speed: %dRPM; Core Clock: %dMhz; Mem Clock: %dMhz; Core Voltage: %dmV; PowerTune: %d; Temp: %.03fC", Settings.GPUSettings[i].Index, Info.FanSpeedRPM, Info.CoreClock, Info.MemClock, Info.CoreVolts, Info.PowerTune, Info.Temp);
if(Settings.GPUSettings[i].FanSpeedPercent >= 0)
{
ret = ADLSetFanspeed(Settings.GPUSettings[i].Index, Settings.GPUSettings[i].FanSpeedPercent);
if(ret)
Log(LOG_ERROR, "ADLSetFanspeed() failed for GPU #%d with code %d.", Settings.GPUSettings[i].Index, ret);
else
Log(LOG_INFO, "Setting fan speed for GPU #%d to %d%% succeeded.", Settings.GPUSettings[i].Index, Settings.GPUSettings[i].FanSpeedPercent);
}
// If either of these are positive, a call to ADLSetClocks is needed
if((Settings.GPUSettings[i].CoreFreq >= 0) || (Settings.GPUSettings[i].MemFreq >= 0))
{
// If corefreq wasn't set, set memfreq. If memfreq wasn't, vice versa.
// If both were set, then set both.
if(Settings.GPUSettings[i].CoreFreq < 0)
ret = ADLSetClocks(Settings.GPUSettings[i].Index, 0, Settings.GPUSettings[i].MemFreq);
else if(Settings.GPUSettings[i].MemFreq < 0)
ret = ADLSetClocks(Settings.GPUSettings[i].Index, Settings.GPUSettings[i].CoreFreq, 0);
else
ret = ADLSetClocks(Settings.GPUSettings[i].Index, Settings.GPUSettings[i].CoreFreq, Settings.GPUSettings[i].MemFreq);
if(ret)
Log(LOG_ERROR, "ADLSetClocks() failed for GPU #%d with code %d.", Settings.GPUSettings[i].Index, ret);
else
Log(LOG_INFO, "Setting clocks on GPU #%d to %d/%d succeeded.", Settings.GPUSettings[i].Index, Settings.GPUSettings[i].CoreFreq, Settings.GPUSettings[i].MemFreq);
}
if(Settings.GPUSettings[i].PowerTune)
{
ret = ADLSetPowertune(Settings.GPUSettings[i].Index, Settings.GPUSettings[i].PowerTune);
if(ret < 0) Log(LOG_ERROR, "ADLSetPowertune failed for GPU #%d with code %d.", Settings.GPUSettings[i].Index, ret);
else Log(LOG_INFO, "Setting powertune on GPU #%d to %d succeeded.", Settings.GPUSettings[i].Index, Settings.GPUSettings[i].PowerTune);
}
}
Log(LOG_INFO, "Sleeping for 10s to allow fan to spin up/down...");
sleep(10);*/
for(int i = 0; i < Settings.TotalThreads; ++i) atomic_store(RestartMining + i, false);
Log(LOG_NOTIFY, "Setting up GPU(s).");
// Note to self - move this list BS into the InitOpenCLPlatformContext() routine
uint32_t *GPUIdxList = (uint32_t *)malloc(sizeof(uint32_t) * Settings.NumGPUs);
uint32_t numGPUs = Settings.NumGPUs;
for(int i = 0; i < Settings.NumGPUs; ++i) {
GPUIdxList[i] = Settings.GPUSettings[i].Index;
if (Settings.GPUSettings[i].Index == -1)
numGPUs--;
}
if (numGPUs) {
ret = InitOpenCLPlatformContext(&PlatformContext, PlatformIdx, numGPUs, GPUIdxList);
if(ret) return(0);
}
free(GPUIdxList);
for(int i = 0; i < numGPUs; ++i) PlatformContext.Devices[i].rawIntensity = Settings.GPUSettings[i].rawIntensity;
// Check for zero was done when parsing config
for(int i = 0; i < numGPUs; ++i)
{
if(Settings.GPUSettings[i].Worksize > PlatformContext.Devices[i].MaximumWorkSize)
{
Log(LOG_NOTIFY, "Worksize set for device %d is greater than its maximum; using maximum value of %d.", i, PlatformContext.Devices[i].MaximumWorkSize);
PlatformContext.Devices[i].WorkSize = PlatformContext.Devices[i].MaximumWorkSize;
}
else
{
PlatformContext.Devices[i].WorkSize = Settings.GPUSettings[i].Worksize;
}
}
for(int ThrIdx = 0, GPUIdx = 0; ThrIdx < Settings.TotalThreads && GPUIdx < Settings.NumGPUs; ThrIdx += Settings.GPUSettings[GPUIdx].Threads, ++GPUIdx)
{
for(int x = 0; x < Settings.GPUSettings[GPUIdx].Threads; ++x)
{
if (Settings.GPUSettings[GPUIdx].Index != -1) {
SetupXMRTest(&MThrInfo[ThrIdx + x].AlgoCtx, &PlatformContext, GPUIdx);
MThrInfo[ThrIdx + x].PlatformContext = &PlatformContext;
} else {
MThrInfo[ThrIdx + x].PlatformContext = NULL;
}
MThrInfo[ThrIdx + x].ThreadID = ThrIdx + x;
MThrInfo[ThrIdx + x].TotalMinerThreads = Settings.TotalThreads;
}
}
// TODO: Have ConnectToPool() return a Pool struct
poolsocket = ConnectToPool(StrippedPoolURL, TmpPort);
if(poolsocket == INVALID_SOCKET)
{
Log(LOG_CRITICAL, "Fatal error connecting to pool.");
return(0);
}
Pool.sockfd = poolsocket;
Log(LOG_NOTIFY, "Successfully connected to pool's stratum.");
ret = pthread_create(&Stratum, NULL, StratumThreadProc, (void *)&Pool);
if(ret)
{
printf("Failed to create Stratum thread.\n");
return(0);
}
// Wait until we've gotten work and filled
// up the job structure before launching the
// miner worker threads.
for(;;)
{
if(CurrentJob) break;
sleep(1);
}
// Work is ready - time to create the broadcast and miner threads
Log(LOG_CRITICAL, "Launching poolbroadcastthread");
pthread_create(&BroadcastThread, NULL, PoolBroadcastThreadProc, (void *)&Pool);
for(int i = 0; i < Settings.TotalThreads; ++i)
{
Log(LOG_CRITICAL, "Created miner thread");
ret = pthread_create(MinerWorker + i, NULL, MinerThreadProc, MThrInfo + i);
if(ret)
{
printf("Failed to create MinerWorker thread.\n");
return(0);
}
}
/*
AlgoContext ctx;
uint8_t TestInput[80];
uint8_t TestOutput[64];
for(int i = 0; i < 76; ++i) TestInput[i] = i;
//TestInput[75] = 6;
SetupXMRTest(&ctx, &PlatformContext, 0);
RunXMRTest(&ctx, &PlatformContext, TestInput, TestOutput, 0);
printf("Output: ");
for(int i = 0; i < 32; ++i) printf("%02X", TestOutput[i]);
putchar('\n');
*/
//json_decref(Settings.AlgoSpecificConfig);
//pthread_create(&ADLThread, NULL, ADLInfoGatherThreadProc, NULL);
char c;
read(ExitPipe[0], &c, 1);
Log(LOG_CRITICAL, "Read %s", c);
pthread_join(Stratum, NULL);
pthread_cancel(Stratum);
pthread_cancel(ADLThread);
#ifndef __ANDROID__
for(int i = 0; i < Settings.TotalThreads; ++i) pthread_cancel(MinerWorker[i]);
#endif
if (numGPUs)
ReleaseOpenCLPlatformContext(&PlatformContext);
//ADLRelease();
FreeSettings(&Settings);
free(RestartMining);
free(Pool.MinerThreads);
//pthread_cancel(BroadcastThread);
Log(LOG_CRITICAL, "Before shutdown");
closesocket(poolsocket);
NetworkingShutdown();
printf("Stratum thread terminated.\n");
return(0);
}
|
ntw_mathp.c | /**
* @brief
*
* @file ntw_mathp.c
* @author Nikolaos Katomeris, 8551, ngkatomer@auth.gr
* @date 30-09-2018
*/
#include "../include/ntw_mathp.h"
#include <math.h>
void NTWMP_multDV(const uint32_t n, double vector[static n], const double c)
{
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
vector[i] *= c;
}
}
void NTWMP_addDV(const uint32_t n, double vectorA[static n], const double vectorB[static n])
{
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
vectorA[i] += vectorB[i];
}
}
void NTWMP_subDV(const uint32_t n, double vectorA[static n], const double vectorB[static n])
{
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
vectorA[i] -= vectorB[i];
}
}
double NTWMP_dotDV(const uint32_t n, const double vectorA[static n], const double vectorB[static n])
{
double dot = 0;
#pragma omp parallel for reduction (+:dot)
for (uint32_t i = 0; i < n; i++)
{
dot += vectorA[i] * vectorB[i];
}
return dot;
}
double NTWMP_sqMagnDV(const uint32_t n, const double vector[static n])
{
double norm = 0;
#pragma omp parallel for reduction (+:norm)
for (uint32_t i = 0; i < n; i++)
{
norm += vector[i] * vector[i];
}
return norm;
}
void NTWMP_normalizeDV(const uint32_t n, double vector[static n])
{
double norm = 0;
#pragma omp parallel for reduction (+:norm)
for (uint32_t i = 0; i < n; i++)
{
norm += vector[i] * vector[i];
}
if (fabs(norm) < NTWMP_DOUBLE_PRES)
{
fprintf(stderr, "%s: The vector is zero, didn't normalize.\n", __func__);
return;
}
norm = sqrt(norm);
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
vector[i] /= norm;
}
}
void NTWMP_normalizeSumDV(const uint32_t n, double vector[static n])
{
double norm = 0;
#pragma omp parallel for reduction (+:norm)
for (uint32_t i = 0; i < n; i++)
{
norm += vector[i];
}
if (fabs(norm) < NTWMP_DOUBLE_PRES)
{
fprintf(stderr, "%s: Vector elements add up to zero, didn't normalize.\n", __func__);
return;
}
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
vector[i] /= norm;
}
}
void NTWMP_assignDV(const uint32_t n, double vectorA[static n], const double vectorB[static n])
{
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
vectorA[i] = vectorB[i];
}
}
double* NTWMP_newUniVectorD(const uint32_t n, const double value)
{
double* vector = malloc(n * sizeof *vector);
if (!vector)
{
fprintf(stderr, "%s: Error at memory allocation.\n", __func__);
exit(EXIT_FAILURE);
}
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
vector[i] = value;
}
return vector;
}
double* NTWMP_newCopyDV(const uint32_t n, const double vector[static n])
{
double* newCopy = malloc(n * sizeof *newCopy);
if (!newCopy)
{
fprintf(stderr, "%s: Error at memory allocation.\n", __func__);
exit(EXIT_FAILURE);
}
#pragma omp parallel for
for (uint32_t i = 0; i < n; i++)
{
newCopy[i] = vector[i];
}
return newCopy;
} |
jacu.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB LU code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "applu.incl"
//---------------------------------------------------------------------
// compute the upper triangular part of the jacobian matrix
//---------------------------------------------------------------------
void jacu(int k)
{
//---------------------------------------------------------------------
// local variables
//---------------------------------------------------------------------
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for schedule(static) nowait
for (j = jend - 1; j >= jst; j--) {
for (i = iend - 1; i >= ist; i--) {
//---------------------------------------------------------------------
// form the block daigonal
//---------------------------------------------------------------------
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
du[j][i][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 );
du[j][i][1][0] = 0.0;
du[j][i][2][0] = 0.0;
du[j][i][3][0] = 0.0;
du[j][i][4][0] = 0.0;
du[j][i][0][1] = dt * 2.0
* ( - tx1 * r43 - ty1 - tz1 )
* ( c34 * tmp2 * u[k][j][i][1] );
du[j][i][1][1] = 1.0
+ dt * 2.0 * c34 * tmp1
* ( tx1 * r43 + ty1 + tz1 )
+ dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 );
du[j][i][2][1] = 0.0;
du[j][i][3][1] = 0.0;
du[j][i][4][1] = 0.0;
du[j][i][0][2] = dt * 2.0
* ( - tx1 - ty1 * r43 - tz1 )
* ( c34 * tmp2 * u[k][j][i][2] );
du[j][i][1][2] = 0.0;
du[j][i][2][2] = 1.0
+ dt * 2.0 * c34 * tmp1
* ( tx1 + ty1 * r43 + tz1 )
+ dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 );
du[j][i][3][2] = 0.0;
du[j][i][4][2] = 0.0;
du[j][i][0][3] = dt * 2.0
* ( - tx1 - ty1 - tz1 * r43 )
* ( c34 * tmp2 * u[k][j][i][3] );
du[j][i][1][3] = 0.0;
du[j][i][2][3] = 0.0;
du[j][i][3][3] = 1.0
+ dt * 2.0 * c34 * tmp1
* ( tx1 + ty1 + tz1 * r43 )
+ dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 );
du[j][i][4][3] = 0.0;
du[j][i][0][4] = -dt * 2.0
* ( ( ( tx1 * ( r43*c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( c34 - c1345 ) ) * ( u[k][j][i][1]*u[k][j][i][1] )
+ ( tx1 * ( c34 - c1345 )
+ ty1 * ( r43*c34 - c1345 )
+ tz1 * ( c34 - c1345 ) ) * ( u[k][j][i][2]*u[k][j][i][2] )
+ ( tx1 * ( c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( r43*c34 - c1345 ) ) * (u[k][j][i][3]*u[k][j][i][3])
) * tmp3
+ ( tx1 + ty1 + tz1 ) * c1345 * tmp2 * u[k][j][i][4] );
du[j][i][1][4] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( c34 - c1345 ) ) * tmp2 * u[k][j][i][1];
du[j][i][2][4] = dt * 2.0
* ( tx1 * ( c34 - c1345 )
+ ty1 * ( r43*c34 -c1345 )
+ tz1 * ( c34 - c1345 ) ) * tmp2 * u[k][j][i][2];
du[j][i][3][4] = dt * 2.0
* ( tx1 * ( c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( r43*c34 - c1345 ) ) * tmp2 * u[k][j][i][3];
du[j][i][4][4] = 1.0
+ dt * 2.0 * ( tx1 + ty1 + tz1 ) * c1345 * tmp1
+ dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 );
//---------------------------------------------------------------------
// form the first block sub-diagonal
//---------------------------------------------------------------------
tmp1 = rho_i[k][j][i+1];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
au[j][i][0][0] = - dt * tx1 * dx1;
au[j][i][1][0] = dt * tx2;
au[j][i][2][0] = 0.0;
au[j][i][3][0] = 0.0;
au[j][i][4][0] = 0.0;
au[j][i][0][1] = dt * tx2
* ( - ( u[k][j][i+1][1] * tmp1 ) * ( u[k][j][i+1][1] * tmp1 )
+ C2 * qs[k][j][i+1] * tmp1 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[k][j][i+1][1] );
au[j][i][1][1] = dt * tx2
* ( ( 2.0 - C2 ) * ( u[k][j][i+1][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
au[j][i][2][1] = dt * tx2
* ( - C2 * ( u[k][j][i+1][2] * tmp1 ) );
au[j][i][3][1] = dt * tx2
* ( - C2 * ( u[k][j][i+1][3] * tmp1 ) );
au[j][i][4][1] = dt * tx2 * C2 ;
au[j][i][0][2] = dt * tx2
* ( - ( u[k][j][i+1][1] * u[k][j][i+1][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[k][j][i+1][2] );
au[j][i][1][2] = dt * tx2 * ( u[k][j][i+1][2] * tmp1 );
au[j][i][2][2] = dt * tx2 * ( u[k][j][i+1][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
au[j][i][3][2] = 0.0;
au[j][i][4][2] = 0.0;
au[j][i][0][3] = dt * tx2
* ( - ( u[k][j][i+1][1]*u[k][j][i+1][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[k][j][i+1][3] );
au[j][i][1][3] = dt * tx2 * ( u[k][j][i+1][3] * tmp1 );
au[j][i][2][3] = 0.0;
au[j][i][3][3] = dt * tx2 * ( u[k][j][i+1][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
au[j][i][4][3] = 0.0;
au[j][i][0][4] = dt * tx2
* ( ( C2 * 2.0 * qs[k][j][i+1]
- C1 * u[k][j][i+1][4] )
* ( u[k][j][i+1][1] * tmp2 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( u[k][j][i+1][1]*u[k][j][i+1][1] )
- ( c34 - c1345 ) * tmp3 * ( u[k][j][i+1][2]*u[k][j][i+1][2] )
- ( c34 - c1345 ) * tmp3 * ( u[k][j][i+1][3]*u[k][j][i+1][3] )
- c1345 * tmp2 * u[k][j][i+1][4] );
au[j][i][1][4] = dt * tx2
* ( C1 * ( u[k][j][i+1][4] * tmp1 )
- C2
* ( u[k][j][i+1][1]*u[k][j][i+1][1] * tmp2
+ qs[k][j][i+1] * tmp1 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[k][j][i+1][1];
au[j][i][2][4] = dt * tx2
* ( - C2 * ( u[k][j][i+1][2]*u[k][j][i+1][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[k][j][i+1][2];
au[j][i][3][4] = dt * tx2
* ( - C2 * ( u[k][j][i+1][3]*u[k][j][i+1][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[k][j][i+1][3];
au[j][i][4][4] = dt * tx2
* ( C1 * ( u[k][j][i+1][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
//---------------------------------------------------------------------
// form the second block sub-diagonal
//---------------------------------------------------------------------
tmp1 = rho_i[k][j+1][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
bu[j][i][0][0] = - dt * ty1 * dy1;
bu[j][i][1][0] = 0.0;
bu[j][i][2][0] = dt * ty2;
bu[j][i][3][0] = 0.0;
bu[j][i][4][0] = 0.0;
bu[j][i][0][1] = dt * ty2
* ( - ( u[k][j+1][i][1]*u[k][j+1][i][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[k][j+1][i][1] );
bu[j][i][1][1] = dt * ty2 * ( u[k][j+1][i][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
bu[j][i][2][1] = dt * ty2 * ( u[k][j+1][i][1] * tmp1 );
bu[j][i][3][1] = 0.0;
bu[j][i][4][1] = 0.0;
bu[j][i][0][2] = dt * ty2
* ( - ( u[k][j+1][i][2] * tmp1 ) * ( u[k][j+1][i][2] * tmp1 )
+ C2 * ( qs[k][j+1][i] * tmp1 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[k][j+1][i][2] );
bu[j][i][1][2] = dt * ty2
* ( - C2 * ( u[k][j+1][i][1] * tmp1 ) );
bu[j][i][2][2] = dt * ty2 * ( ( 2.0 - C2 )
* ( u[k][j+1][i][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
bu[j][i][3][2] = dt * ty2
* ( - C2 * ( u[k][j+1][i][3] * tmp1 ) );
bu[j][i][4][2] = dt * ty2 * C2;
bu[j][i][0][3] = dt * ty2
* ( - ( u[k][j+1][i][2]*u[k][j+1][i][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[k][j+1][i][3] );
bu[j][i][1][3] = 0.0;
bu[j][i][2][3] = dt * ty2 * ( u[k][j+1][i][3] * tmp1 );
bu[j][i][3][3] = dt * ty2 * ( u[k][j+1][i][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
bu[j][i][4][3] = 0.0;
bu[j][i][0][4] = dt * ty2
* ( ( C2 * 2.0 * qs[k][j+1][i]
- C1 * u[k][j+1][i][4] )
* ( u[k][j+1][i][2] * tmp2 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(u[k][j+1][i][1]*u[k][j+1][i][1])
- ( r43*c34 - c1345 )*tmp3*(u[k][j+1][i][2]*u[k][j+1][i][2])
- ( c34 - c1345 )*tmp3*(u[k][j+1][i][3]*u[k][j+1][i][3])
- c1345*tmp2*u[k][j+1][i][4] );
bu[j][i][1][4] = dt * ty2
* ( - C2 * ( u[k][j+1][i][1]*u[k][j+1][i][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[k][j+1][i][1];
bu[j][i][2][4] = dt * ty2
* ( C1 * ( u[k][j+1][i][4] * tmp1 )
- C2
* ( qs[k][j+1][i] * tmp1
+ u[k][j+1][i][2]*u[k][j+1][i][2] * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[k][j+1][i][2];
bu[j][i][3][4] = dt * ty2
* ( - C2 * ( u[k][j+1][i][2]*u[k][j+1][i][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[k][j+1][i][3];
bu[j][i][4][4] = dt * ty2
* ( C1 * ( u[k][j+1][i][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
//---------------------------------------------------------------------
// form the third block sub-diagonal
//---------------------------------------------------------------------
tmp1 = rho_i[k+1][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
cu[j][i][0][0] = - dt * tz1 * dz1;
cu[j][i][1][0] = 0.0;
cu[j][i][2][0] = 0.0;
cu[j][i][3][0] = dt * tz2;
cu[j][i][4][0] = 0.0;
cu[j][i][0][1] = dt * tz2
* ( - ( u[k+1][j][i][1]*u[k+1][j][i][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[k+1][j][i][1] );
cu[j][i][1][1] = dt * tz2 * ( u[k+1][j][i][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2;
cu[j][i][2][1] = 0.0;
cu[j][i][3][1] = dt * tz2 * ( u[k+1][j][i][1] * tmp1 );
cu[j][i][4][1] = 0.0;
cu[j][i][0][2] = dt * tz2
* ( - ( u[k+1][j][i][2]*u[k+1][j][i][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[k+1][j][i][2] );
cu[j][i][1][2] = 0.0;
cu[j][i][2][2] = dt * tz2 * ( u[k+1][j][i][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
cu[j][i][3][2] = dt * tz2 * ( u[k+1][j][i][2] * tmp1 );
cu[j][i][4][2] = 0.0;
cu[j][i][0][3] = dt * tz2
* ( - ( u[k+1][j][i][3] * tmp1 ) * ( u[k+1][j][i][3] * tmp1 )
+ C2 * ( qs[k+1][j][i] * tmp1 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[k+1][j][i][3] );
cu[j][i][1][3] = dt * tz2
* ( - C2 * ( u[k+1][j][i][1] * tmp1 ) );
cu[j][i][2][3] = dt * tz2
* ( - C2 * ( u[k+1][j][i][2] * tmp1 ) );
cu[j][i][3][3] = dt * tz2 * ( 2.0 - C2 )
* ( u[k+1][j][i][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
cu[j][i][4][3] = dt * tz2 * C2;
cu[j][i][0][4] = dt * tz2
* ( ( C2 * 2.0 * qs[k+1][j][i]
- C1 * u[k+1][j][i][4] )
* ( u[k+1][j][i][3] * tmp2 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[k+1][j][i][1]*u[k+1][j][i][1])
- ( c34 - c1345 ) * tmp3 * (u[k+1][j][i][2]*u[k+1][j][i][2])
- ( r43*c34 - c1345 )* tmp3 * (u[k+1][j][i][3]*u[k+1][j][i][3])
- c1345 * tmp2 * u[k+1][j][i][4] );
cu[j][i][1][4] = dt * tz2
* ( - C2 * ( u[k+1][j][i][1]*u[k+1][j][i][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[k+1][j][i][1];
cu[j][i][2][4] = dt * tz2
* ( - C2 * ( u[k+1][j][i][2]*u[k+1][j][i][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[k+1][j][i][2];
cu[j][i][3][4] = dt * tz2
* ( C1 * ( u[k+1][j][i][4] * tmp1 )
- C2
* ( qs[k+1][j][i] * tmp1
+ u[k+1][j][i][3]*u[k+1][j][i][3] * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[k+1][j][i][3];
cu[j][i][4][4] = dt * tz2
* ( C1 * ( u[k+1][j][i][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
}
}
}
|
GB_binop__islt_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint8)
// A*D function (colscale): GB (_AxD__islt_uint8)
// D*A function (rowscale): GB (_DxB__islt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint8)
// C=scalar+B GB (_bind1st__islt_uint8)
// C=scalar+B' GB (_bind1st_tran__islt_uint8)
// C=A+scalar GB (_bind2nd__islt_uint8)
// C=A'+scalar GB (_bind2nd_tran__islt_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Utils.h | #pragma once
#include <float.h>
#include <math.h>
#include <omp.h>
#include <string.h>
#include <chrono>
#include <vector>
#include <open3d/geometry/PointCloud.h>
#include <Eigen/Core>
namespace primitives_fitting {
namespace utils {
/**
* @brief perfoem normal consistent, here we have assumption that the point clouds are all
* in camera coordinate
*
* @param pc
*/
inline void NormalConsistent(open3d::geometry::PointCloud &pc) {
if (!pc.HasNormals()) {
std::cout << "(NormalConsistent) The target point cloud has no normals" << std::endl;
} else {
const int size = pc.points_.size();
#pragma omp parallel for
for (size_t i = 0; i < size; i++) {
pc.normals_[i].normalize();
if (pc.points_[i].dot(pc.normals_[i]) > 0) {
pc.normals_[i] *= -1;
}
}
}
}
/**
* @brief extract data by indices
*
* @param src
* @param index
* @param dst
*/
template <typename T>
inline void GetVectorByIndex(const std::vector<T> &src, const std::vector<size_t> &index,
std::vector<T> &dst) {
const size_t num = index.size();
dst.resize(num);
for (size_t i = 0; i < num; i++) {
dst[i] = src[index[i]];
}
}
/**
* @brief Get Eigen matrix from vector
*
* @param src
* @param index
* @param dst
*/
inline void GetMatrixByIndex(const std::vector<Eigen::Vector3d> &src,
const std::vector<size_t> &index, Eigen::Matrix3Xd &dst) {
const size_t num = index.size();
dst.setZero(3, num);
if (src.size() == 0) {
return;
}
#pragma omp parallel for
for (size_t i = 0; i < num; i++) {
dst.col(i) = src[index[i]];
}
}
/**
* @brief convert degree to radian
*
* @param angle_deg
* @return double
*/
inline double Deg2Rad(const double angle_deg) {
return angle_deg / 180 * M_PI;
}
/**
* @brief convert radian to degree
*
* @param angle_rad
* @return double
*/
inline double Rad2Deg(const double angle_rad) {
return angle_rad / M_PI * 180;
}
/**
* @brief data conversion
*
* @param pc
* @param new_pc
*/
inline void EigenMatrixToVector(const Eigen::Ref<const Eigen::MatrixX3d> &pc,
std::vector<Eigen::Vector3d> &new_pc) {
const size_t num = pc.rows();
const size_t data_length = sizeof(double) * 3;
new_pc.resize(num);
#pragma omp parallel for
for (size_t i = 0; i < num; i++) {
const Eigen::Vector3d &p = pc.row(i);
memcpy(new_pc[i].data(), p.data(), data_length);
}
}
/**
* @brief data conversion
*
* @param pc
* @param normal
* @param new_pc
*/
inline void EigenMatrixToVector(const Eigen::Ref<const Eigen::MatrixX3d> &pc,
const Eigen::Ref<const Eigen::MatrixX3d> &normal,
std::vector<Eigen::Vector6d> &new_pc) {
const size_t num = pc.rows();
const size_t data_length = sizeof(double) * 3;
new_pc.resize(num);
#pragma omp parallel for
for (size_t i = 0; i < num; i++) {
const Eigen::Vector3d &p = pc.row(i);
const Eigen::Vector3d &n = normal.row(i);
memcpy(new_pc[i].data(), p.data(), data_length);
memcpy(new_pc[i].data() + 3, n.data(), data_length);
}
}
/**
* @brief data conversion
*
* @param pc
* @param new_pc
*/
inline void VectorToEigenMatrix(const std::vector<Eigen::Vector3d> &pc,
Eigen::Matrix<double, Eigen::Dynamic, 3> &new_pc) {
const size_t num = pc.size();
new_pc.setZero(num, 3);
#pragma omp parallel for
for (size_t i = 0; i < num; i++) {
new_pc.row(i) = pc[i].transpose();
}
}
/**
* @brief data conversion
*
* @param pc
* @param new_pc
*/
inline void VectorToEigenMatrix(const std::vector<Eigen::Vector6d> &pc,
Eigen::Matrix<double, Eigen::Dynamic, 6> &new_pc) {
const size_t num = pc.size();
new_pc.setZero(num, 6);
#pragma omp parallel for
for (size_t i = 0; i < num; i++) {
new_pc.row(i) = pc[i].transpose();
}
}
/**
* @brief Compute the coordinate transformation between the target coordinate
* and origin coordinate
*
* @param x_head Point at target coordinate x-axis
* @param origin Point at target coordinate origin
* @param ref Point at target coordinate x-y plane
* @return Eigen::Matrix4d
*/
template <typename T>
Eigen::Matrix<T, 4, 4> CalcCoordinateTransform(const Eigen::Matrix<T, 3, 1> &x_head,
const Eigen::Matrix<T, 3, 1> &origin,
const Eigen::Matrix<T, 3, 1> &ref) {
const Eigen::Matrix<T, 3, 1> x_axis = (x_head - origin) / (x_head - origin).norm();
const Eigen::Matrix<T, 3, 1> tmp_axis = (ref - origin) / (ref - origin).norm();
Eigen::Matrix<T, 3, 1> z_axis = x_axis.cross(tmp_axis);
if (z_axis.dot(Eigen::Matrix<T, 3, 1>(0, 0, 1)) > 0) {
z_axis /= z_axis.norm();
} else {
z_axis /= -z_axis.norm();
}
Eigen::Matrix<T, 3, 1> y_axis = z_axis.cross(x_axis);
y_axis /= y_axis.norm();
Eigen::Matrix<T, 4, 4> transform;
transform << x_axis(0), y_axis(0), z_axis(0), origin(0), x_axis(1), y_axis(1), z_axis(1),
origin(1), x_axis(2), y_axis(2), z_axis(2), origin(2), 0, 0, 0, 1;
return transform;
}
/**
* @brief compute point to line distance
*
* @tparam T
* @param query
* @param point1
* @param point2
* @return T
*/
template <typename T>
inline T CalcPoint2LineDistance(const Eigen::Matrix<T, 3, 1> &query,
const Eigen::Matrix<T, 3, 1> &point1,
const Eigen::Matrix<T, 3, 1> &point2) {
const Eigen::Matrix<T, 3, 1> a = query - point1;
const Eigen::Matrix<T, 3, 1> b = query - point2;
const Eigen::Matrix<T, 3, 1> c = point2 - point1;
return a.cross(b).norm() / c.norm();
}
} // namespace utils
} // namespace primitives_fitting
|
GB_unaryop__ainv_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_int8
// op(A') function: GB_tran__ainv_uint32_int8
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_int8
(
uint32_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main3.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <complex.h>
#include "ppm.h"
#include "fractal.h"
#define UNKNOWN_MODE 0
#define JULIA_MODE 1
#define MANDELBROT_MODE 2
int main(int argc, char **argv) {
if (argc < 2) {
fprintf(stderr, "Usage: %s [J/M] [options]\n", argv[0]);
return 1;
} else {
int mode = UNKNOWN_MODE;
if (strcmp(argv[1], "J") == 0) {
mode = JULIA_MODE;
} else if (strcmp(argv[1], "M") == 0) {
mode = MANDELBROT_MODE;
}
if (mode == UNKNOWN_MODE) {
fprintf(stderr, "Unrecognized mode \"%s\"", argv[1]);
return 2;
} else if (mode == JULIA_MODE && argc != 13) {
fprintf(stderr, "Usage: %s J [width] [height] [x_min] [x_max] [y_min] [y_max] [max_iterations] [color_multiplier] [c_re] [c_im] [d]\n", argv[0]);
return 3;
} else if (mode == MANDELBROT_MODE && argc != 11) {
fprintf(stderr, "Usage: %s M [width] [height] [x_min] [x_max] [y_min] [y_max] [max_iterations] [color_multiplier] [d]\n", argv[0]);
return 4;
}
int arg = 2;
const int width = strtol(argv[arg++], NULL, 0);
const int height = strtol(argv[arg++], NULL, 0);
const double x_min = strtod(argv[arg++], NULL);
const double x_max = strtod(argv[arg++], NULL);
const double y_min = strtod(argv[arg++], NULL);
const double y_max = strtod(argv[arg++], NULL);
const int max_iterations = strtol(argv[arg++], NULL, 0);
const int color_multiplier = strtol(argv[arg++], NULL, 0);
const double c_re = mode == JULIA_MODE ? strtod(argv[arg++], NULL) : 0;
const double c_im = mode == JULIA_MODE ? strtod(argv[arg++], NULL) : 0;
const double d = strtod(argv[arg++], NULL);
const double x_step = (x_max - x_min) / width;
const double y_step = (y_max - y_min) / height;
const fractal_config fc = {EPSILON, INFINITY, max_iterations};
const double complex c = c_re + c_im * I;
double complex parameters[2] = {c, d};
double x0, y0;
double complex z0;
int iterations = 0, color = 0;
output_color_header(width, height);
#pragma omp parallel for schedule(dynamic, 1)
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
x0 = x_min + col * x_step;
y0 = y_max - row * y_step;
z0 = x0 + y0 * I;
if (mode == JULIA_MODE) {
iterations = julia(complex_polynomial,
z0,
parameters,
&fc);
} else if (mode == MANDELBROT_MODE) {
iterations = generalized_mandelbrot(complex_polynomial,
z0,
parameters,
0,
&fc);
}
if (iterations == CONVERGE) {
color = 255;
} else {
color = color_multiplier * iterations;
}
output_color(color, color, color);
}
next_color_row();
}
return 0;
}
}
|
Graph.h | /*
* Graph.h
*
* Created on: 01.06.2014
* Author: Christian Staudt (christian.staudt@kit.edu), Klara Reichard
* (klara.reichard@gmail.com), Marvin Ritter (marvin.ritter@gmail.com)
*/
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <functional>
#include <queue>
#include <stack>
#include <stdexcept>
#include <unordered_set>
#include <utility>
#include <vector>
#include "../Globals.h"
#include "../auxiliary/FunctionTraits.h"
#include "../auxiliary/Log.h"
#include "../auxiliary/Random.h"
#include "../viz/Point.h"
#include "Coordinates.h"
namespace NetworKit {
/**
* A weighted edge used for the graph constructor with
* initializer list syntax.
*/
struct WeightedEdge {
node u, v;
edgeweight weight;
WeightedEdge(node u, node v, edgeweight w) : u(u), v(v), weight(w) {}
};
inline bool operator<(const WeightedEdge &e1, const WeightedEdge &e2) {
return e1.weight < e2.weight;
}
struct Edge {
node u, v;
Edge(node _u, node _v, bool sorted = false) {
if (sorted) {
u = std::min(_u, _v);
v = std::max(_u, _v);
} else {
u = _u;
v = _v;
}
}
};
inline bool operator==(const Edge &e1, const Edge &e2) {
return e1.u == e2.u && e1.v == e2.v;
}
} // namespace NetworKit
namespace std {
template <> struct hash<NetworKit::Edge> {
size_t operator()(const NetworKit::Edge &e) const {
return hash_node(e.u) ^ hash_node(e.v);
}
hash<NetworKit::node> hash_node;
};
} // namespace std
namespace NetworKit {
// forward declaration to randomization/CurveballImpl.h
namespace CurveballDetails {
class CurveballMaterialization;
}
/**
* @ingroup graph
* A graph (with optional weights) and parallel iterator methods.
*/
class Graph final {
friend class ParallelPartitionCoarsening;
friend class GraphBuilder;
friend class CurveballDetails::CurveballMaterialization;
private:
// graph attributes
count id; //!< unique graph id, starts at 0
std::string name; //!< name of the graph, initially G#ID
// scalars
count n; //!< current number of nodes
count m; //!< current number of edges
count storedNumberOfSelfLoops; //!< current number of self loops, edges which
//!< have the same origin and target
node
z; //!< current upper bound of node ids, z will be the id of the next node
edgeid omega; //!< current upper bound of edge ids, will be the id of the next
//!< edge
count t; //!< current time step
bool weighted; //!< true if the graph is weighted, false otherwise
bool directed; //!< true if the graph is directed, false otherwise
bool edgesIndexed; //!< true if edge ids have been assigned
// per node data
std::vector<bool> exists; //!< exists[v] is true if node v has not been
//!< removed from the graph
Coordinates<float> coordinates; //!< coordinates of nodes (if present)
std::vector<count> inDeg; //!< only used for directed graphs, number of edges
//!< incoming per node
std::vector<count>
outDeg; //!< degree of every node, zero if node was removed. For directed
//!< graphs only outgoing edges count
std::vector<std::vector<node>>
inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes
//!< u that have an edge (u, v)
std::vector<std::vector<node>>
outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in
//!< outEdges[u] and for undirected also u in outEdges[v]
std::vector<std::vector<edgeweight>>
inEdgeWeights; //!< only used for directed graphs, same schema as inEdges
std::vector<std::vector<edgeweight>>
outEdgeWeights; //!< same schema (and same order!) as outEdges
std::vector<std::vector<edgeid>>
inEdgeIds; //!< only used for directed graphs, same schema as inEdges
std::vector<std::vector<edgeid>>
outEdgeIds; //!< same schema (and same order!) as outEdges
/**
* Returns the next unique graph id.
*/
count getNextGraphId();
/**
* Returns the index of node u in the array of incoming edges of node v. (for
* directed graphs inEdges is searched, while for indirected outEdges is
* searched, which gives the same result as indexInOutEdgeArray).
*/
index indexInInEdgeArray(node v, node u) const;
/**
* Returns the index of node v in the array of outgoing edges of node u.
*/
index indexInOutEdgeArray(node u, node v) const;
/**
* Computes the weighted in/out degree of a graph.
*
* @param inDegree whether to compute the in degree or the out degree.
*/
edgeweight computeWeightedDegree(const node &v,
const bool inDegree = false) const;
/**
* Computes the maximum in/out degree of the graph.
*
* @param inDegree wheter to compute the in degree or the out degree.
*/
count computeMaxDegree(const bool inDegree = false) const;
/**
* Computes the maximum in/out weighted degree of the graph
*
* @param inDegree whether to compute the in degree or the out degree
*/
edgeweight computeMaxWeightedDegree(const bool inDegree = false) const;
/**
* Returns the edge weight of the outgoing edge of index i in the outgoing
* edges of node u
* @param u The node
* @param i The index
* @return The weight of the outgoing edge or defaultEdgeWeight if the graph
* is unweighted
*/
template <bool hasWeights>
inline edgeweight getOutEdgeWeight(node u, index i) const;
/**
* Returns the edge weight of the incoming edge of index i in the incoming
* edges of node u
*
* @param u The node
* @param i The index in the incoming edge array
* @return The weight of the incoming edge
*/
template <bool hasWeights>
inline edgeweight getInEdgeWeight(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the outgoing edges of node u
*
* @param u The node
* @param i The index in the outgoing edges
* @return The edge id
*/
template <bool graphHasEdgeIds>
inline edgeid getOutEdgeId(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edges of u
* @return The edge id
*/
template <bool graphHasEdgeIds>
inline edgeid getInEdgeId(node u, index i) const;
/**
* @brief Returns if the edge (u, v) shall be used in the iteration of all
* edgesIndexed
*
* @param u The source node of the edge
* @param v The target node of the edge
* @return If the node shall be used, i.e. if v is not none and in the
* undirected case if u >= v
*/
template <bool graphIsDirected>
inline bool useEdgeInIteration(node u, node v) const;
/**
* @brief Implementation of the for loop for outgoing edges of u
*
* Note: If all (valid) outgoing edges shall be considered, graphIsDirected
* needs to be set to true
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void forOutEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for incoming edges of u
*
* For undirected graphs, this is the same as forOutEdgesOfImpl but u and v
* are changed in the handle
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void forInEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for all edges, @see forEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void forEdgeImpl(L handle) const;
/**
* @brief Parallel implementation of the for loop for all edges, @see
* parallelForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void parallelForEdgesImpl(L handle) const;
/**
* @brief Summation variant of the parallel for loop for all edges, @see
* parallelSumForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline double parallelSumForEdgesImpl(L handle) const;
/*
* In the following definition, Aux::FunctionTraits is used in order to only
* execute lambda functions with the appropriate parameters. The
* decltype-return type is used for determining the return type of the lambda
* (needed for summation) but also determines if the lambda accepts the
* correct number of parameters. Otherwise the return type declaration fails
* and the function is excluded from overload resoluation. Then there are
* multiple possible lambdas with three (third parameter id or weight) and two
* (second parameter can be second node id or edge weight for neighbor
* iterators). This is checked using Aux::FunctionTraits and std::enable_if.
* std::enable_if only defines the type member when the given bool is true,
* this bool comes from std::is_same which compares two types. The function
* traits give either the parameter type or if it is out of bounds they define
* type as void.
*/
/**
* Triggers a static assert error when no other method is chosen. Because of
* the use of "..." as arguments, the priority of this method is lower than
* the priority of the other methods. This method avoids ugly and unreadable
* template substitution error messages from the other declarations.
*/
template <class F, void * = (void *)0>
typename Aux::FunctionTraits<F>::result_type edgeLambda(F &, ...) const {
// the strange condition is used in order to delay the eveluation of the
// static assert to the moment when this function is actually used
static_assert(!std::is_same<F, F>::value,
"Your lambda does not support the required parameters or the "
"parameters have the wrong type.");
return std::declval<typename Aux::FunctionTraits<
F>::result_type>(); // use the correct return type (this won't compile)
}
/**
* Calls the given function f if its fourth argument is of the type edgeid and
* third of type edgeweight Note that the decltype check is not enough as
* edgeweight can be casted to node and we want to assure that .
*/
template <
class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 3) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<
F>::template arg<2>::type>::value &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<
3>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const
-> decltype(f(u, v, ew, id)) {
return f(u, v, ew, id);
}
/**
* Calls the given function f if its third argument is of the type edgeid,
* discards the edge weight Note that the decltype check is not enough as
* edgeweight can be casted to node.
*/
template <
class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<
2>::type>::value &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::
type>::value /* prevent f(v, weight, eid) */
>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight, edgeid id) const
-> decltype(f(u, v, id)) {
return f(u, v, id);
}
/**
* Calls the given function f if its third argument is of type edgeweight,
* discards the edge id Note that the decltype check is not enough as node can
* be casted to edgeweight.
*/
template <class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeweight,
typename Aux::FunctionTraits<F>::template arg<
2>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid /*id*/) const
-> decltype(f(u, v, ew)) {
return f(u, v, ew);
}
/**
* Calls the given function f if it has only two arguments and the second
* argument is of type node, discards edge weight and id Note that the
* decltype check is not enough as edgeweight can be casted to node.
*/
template <
class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<
1>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight /*ew*/, edgeid /*id*/) const
-> decltype(f(u, v)) {
return f(u, v);
}
/**
* Calls the given function f if it has only two arguments and the second
* argument is of type edgeweight, discards the first node and the edge id
* Note that the decltype check is not enough as edgeweight can be casted to
* node.
*/
template <class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<edgeweight,
typename Aux::FunctionTraits<F>::template arg<
1>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid /*id*/) const
-> decltype(f(u, ew)) {
return f(v, ew);
}
/**
* Calls the given function f if it has only one argument, discards the first
* node id, the edge weight and the edge id
*/
template <class F, void * = (void *)0>
auto edgeLambda(F &f, node, node v, edgeweight, edgeid) const
-> decltype(f(v)) {
return f(v);
}
/**
* Calls the given BFS handle with distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) {
return f(u, dist);
}
/**
* Calls the given BFS handle without distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count) const -> decltype(f(u)) {
return f(u);
}
public:
/**
* Create a graph of @a n nodes. The graph has assignable edge weights if @a
* weighted is set to <code>true</code>. If @a weighted is set to
* <code>false</code> each edge has edge weight 1.0 and any other weight
* assignment will be ignored.
* @param n Number of nodes.
* @param weighted If set to <code>true</code>, the graph has edge weights.
* @param directed If set to @c true, the graph will be directed.
*/
Graph(count n = 0, bool weighted = false, bool directed = false);
Graph(const Graph &G, bool weighted, bool directed);
/**
* Generate a weighted graph from a list of edges. (Useful for small
* graphs in unit tests that you do not want to read from a file.)
*
* @param[in] edges list of weighted edges
*/
Graph(std::initializer_list<WeightedEdge> edges);
/**
* Create a graph as copy of @a other.
* @param other The graph to copy.
*/
Graph(const Graph &other) = default;
/** Default move constructor */
Graph(Graph &&other) = default;
/** Default destructor */
~Graph() = default;
/** Default move assignment operator */
Graph &operator=(Graph &&other) = default;
/** Default copy assignment operator */
Graph &operator=(const Graph &other) = default;
/** EDGE IDS **/
/**
* Initially assign integer edge identifiers.
*
* @param force Force re-indexing of edges even if they have already been
* indexed
*/
void indexEdges(bool force = false);
/**
* Checks if edges have been indexed
*
* @return bool if edges have been indexed
*/
bool hasEdgeIds() const { return edgesIndexed; }
/**
* Get the id of the given edge.
*/
edgeid edgeId(node u, node v) const;
/**
* Get an upper bound for the edge ids in the graph.
* @return An upper bound for the edge ids.
*/
index upperEdgeIdBound() const { return omega; }
/** GRAPH INFORMATION **/
/**
* Get the ID of this graph. The ID is a unique unsigned integer given to
* every graph on construction.
*/
count getId() const { return id; }
/**
* Return the type of the graph.
* Graph: not weighted, undirected
* WeightedGraph: weighted, undirected
* DirectedGraph: not weighted, directed
* WeightedDirectedGraph: weighted, directed
*/
std::string typ() const;
/**
* Try to save some memory by shrinking internal data structures of the graph.
* Only run this once you finished editing the graph. Otherwise it will cause
* unnecessary reallocation of memory.
*/
void shrinkToFit();
/**
* Compacts the adjacency arrays by re-using no longer neede slots from
* deleted edges.
*/
void compactEdges();
/**
* Sorts the adjacency arrays by node id. While the running time is linear
* this temporarily duplicates the memory.
*/
void sortEdges();
/**
* Set name of graph to @a name.
* @param name The name.
*/
void setName(std::string name) { this->name = name; }
/*
* Returns the name of the graph.
* @return The name of the graph.
*/
std::string getName() const { return name; }
/**
* Returns a string representation of the graph.
* @return A string representation.
*/
std::string toString() const;
/* COPYING */
/*
* Copies all nodes to a new graph
* @return graph with the same nodes.
*/
Graph copyNodes() const;
/* NODE MODIFIERS */
/**
* Add a new node to the graph and return it.
* @return The new node.
*/
node addNode();
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Add a new node to the graph with coordinates @a x and @y and return it.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
node addNode(float x, float y);
/**
* Remove a node @a v and all incident edges from the graph.
*
* Incoming as well as outgoing edges will be removed.
*
* @param u Node.
*/
void removeNode(node v);
/**
* Check if node @a v exists in the graph.
*
* @param v Node.
* @return @c true if @a v exists, @c false otherwise.
*/
bool hasNode(node v) const { return (v < z) && this->exists[v]; }
/**
* Restores a previously deleted node @a v with its previous id in the graph.
*
* @param v Node.
*
*/
void restoreNode(node v);
// SET OPERATIONS
/**
* Appends another graph to this graph as a new subgraph. Performs node
* id remapping.
* @param G [description]
*/
void append(const Graph &G);
/**
* Modifies this graph to be the union of it and another graph.
* Nodes with the same ids are identified with each other.
* @param G [description]
*/
void merge(const Graph &G);
// SUBGRAPHS
Graph subgraphFromNodes(const std::unordered_set<node> &nodes) const;
/** NODE PROPERTIES **/
/**
* Returns the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degree(node v) const { return outDeg[v]; }
/**
* Get the number of incoming neighbors of @a v.
*
* @param v Node.
* @return The number of incoming neighbors.
* @note If the graph is not directed, the outgoing degree is returned.
*/
count degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; }
/**
* Get the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degreeOut(node v) const { return outDeg[v]; }
/**
* Returns the maximum out-degree of the graph.
*
* @return The maximum out-degree of the graph.
*/
count maxDegree() const;
/**
* Returns the maximum in-degree of the graph.
*
* @return The maximum in-degree of the graph.
*/
count maxDegreeIn() const;
/**
* Check whether @a v is isolated, i.e. degree is 0.
* @param v Node.
* @return @c true if the node is isolated (= degree is 0)
*/
bool isIsolated(node v) const {
return outDeg[v] == 0 && (!directed || inDeg[v] == 0);
}
/**
* Returns the weighted degree of @a v.
*
* @param v Node.
* @return Weighted degree of @a v.
* @note For directed graphs this is the sum of weights of all outgoing edges.
* of @a v.
*/
edgeweight weightedDegree(const node &v) const;
/**
* Returns the maximum weighted degree of the graph.
*
* @return Maximum weighted degree of the graph.
* @note For directed graphs this is the sum of weights of all outgoing edges.
*/
edgeweight maxWeightedDegree() const;
/**
* Returns the maximum weighted in degree of the graph.
*
* @return Maximum weighted in degree of the graph.
* @note For directed graphs this is the sum of weights of all in-going edges.
*/
edgeweight maxWeightedDegreeIn() const;
/**
* Returns the weighted in-degree of @a v.
*
* @param v Node.
* @return Weighted in-degree of @a v.
* @note For directed graphs this is the sum of weights of all ingoing edges.
* of @a v.
*/
edgeweight weightedDegreeIn(const node &v) const;
/**
* Returns the volume of the @a v, which is the weighted degree with
* self-loops counted twice.
*
* @param v Node.
* @return The volume of the @a v.
*/
edgeweight volume(node v) const;
/**
* Returns a random node of the graph.
* @return A random node.
*/
node randomNode() const;
/**
* Returns a random neighbor of @a u and @c none if degree is zero.
*
* @param u Node.
* @return A random neighbor of @a u.
*/
node randomNeighbor(node u) const;
/* EDGE MODIFIERS */
/**
* Insert an edge between the nodes @a u and @a v. If the graph is weighted
* you can optionally set a weight for this edge. The default weight is 1.0.
* Note: Multi-edges are not supported and will NOT be handled consistently by
* the graph data structure.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @param weight Optional edge weight.
*/
void addEdge(node u, node v, edgeweight ew = defaultEdgeWeight);
/**
* Removes the undirected edge {@a u,@a v}.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
*/
void removeEdge(node u, node v);
/**
* Efficiently removes all the edges adjacent to a set of nodes that is not
* connected to the rest of the graph. This is meant to optimize the Kadabra
* algorithm.
* @param nodesInSet vector of nodes that form a connected component that is
* isolated from the rest of the graph.
*/
void removeEdgesFromIsolatedSet(const std::vector<node> &nodesInSet);
/**
* Removes all the edges in the graph.
*/
void removeAllEdges();
/**
* Removes all self-loops in the graph.
*/
void removeSelfLoops();
/**
* Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2,
* @a t2} into {@a s2, @a t1}.
*
* If there are edge weights or edge ids, they are preserved. Note that no
* check is performed if the swap is actually possible, i.e. does not generate
* duplicate edges.
*
* @param s1 The first source
* @param t1 The first target
* @param s2 The second source
* @param t2 The second target
*/
void swapEdge(node s1, node t1, node s2,
node t2);
/**
* Checks if undirected edge {@a u,@a v} exists in the graph.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return <code>true</code> if the edge exists, <code>false</code> otherwise.
*/
bool hasEdge(node u, node v) const;
/**
* Returns a random edge. By default a random node u is chosen and then some
* random neighbor v. So the probability of choosing (u, v) highly depends on
* the degree of u. Setting uniformDistribution to true, will give you a real
* uniform distributed edge, but will be very slow. So only use
* uniformDistribution for single calls outside of any loops.
*/
std::pair<node, node> randomEdge(bool uniformDistribution = false) const;
/**
* Returns a vector with nr random edges. The edges are chosen uniform random.
*/
std::vector<std::pair<node, node>> randomEdges(count nr) const;
/* GLOBAL PROPERTIES */
/**
* Returns <code>true</code> if this graph supports edge weights other
* than 1.0.
* @return <code>true</code> if this graph supports edge weights other
* than 1.0.
*/
bool isWeighted() const { return weighted; }
/**
* Return @c true if this graph supports directed edges.
* @return @c true if this graph supports directed edges.
*/
bool isDirected() const { return directed; }
/**
* Return <code>true</code> if graph contains no nodes.
* @return <code>true</code> if graph contains no nodes.
*/
bool isEmpty() const { return n == 0; }
/**
* Return the number of nodes in the graph.
* @return The number of nodes.
*/
count numberOfNodes() const { return n; }
/**
* Return the number of edges in the graph.
* @return The number of edges.
*/
count numberOfEdges() const { return m; }
/**
* @return a pair (n, m) where n is the number of nodes and m is the number of
* edges
*/
std::pair<count, count> const size() const { return {n, m}; };
/**
* @return the density of the graph
*/
double density() const {
count n = numberOfNodes();
count m = numberOfEdges();
count loops = numberOfSelfLoops();
m -= loops;
double d;
if (isDirected()) {
d = m / (double)(n * (n - 1));
} else {
d = (2 * m) / (double)(n * (n - 1));
}
return d;
}
/**
* Return the number of loops {v,v} in the graph.
* @return The number of loops.
* @note This involves calculation, so store result if needed multiple times.
*/
count numberOfSelfLoops() const;
/**
* Get an upper bound for the node ids in the graph.
* @return An upper bound for the node ids.
*/
index upperNodeIdBound() const { return z; }
/**
* Check for invalid graph states, such as multi-edges.
* @return False if the graph is in invalid state.
*/
bool checkConsistency() const;
/* DYNAMICS */
/**
* Trigger a time step - increments counter.
*/
void timeStep() { t++; }
/**
* Get time step counter.
* @return Time step counter.
*/
count time() { return t; }
/* COORDINATES */
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Sets the coordinate of @a v to @a value.
*
* @param v Node.
* @param value The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
void setCoordinate(node v, Point<float> value) {
coordinates.setCoordinate(v, value);
}
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get the coordinate of @a v.
* @param v Node.
* @return The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
Point<float> &getCoordinate(node v) { return coordinates.getCoordinate(v); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get minimum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for minimum.
* @return The minimum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
float minCoordinate(count dim) { return coordinates.minCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get maximum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for maximum.
* @return The maximum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
float maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Initializes the coordinates for the nodes in graph.
* @note This has to be called once and before you set coordinates. Call this
* method again if new nodes have been added.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
void initCoordinates() { coordinates.init(z); }
/* EDGE ATTRIBUTES */
/**
* Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist.
* BEWARE: Running time is \Theta(deg(u))!
*
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist.
*/
edgeweight weight(node u, node v) const;
/**
* Set the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void setWeight(node u, node v, edgeweight ew);
/**
* Increase the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void increaseWeight(node u, node v, edgeweight ew);
/* SUMS */
/**
* Returns the sum of all edge weights.
* @return The sum of all edge weights.
*/
edgeweight totalEdgeWeight() const;
/* Collections */
/**
* Get list of all nodes.
* @return List of all nodes.
*/
std::vector<node> nodes() const;
/**
* Get list of edges as node pairs.
* @return List of edges as node pairs.
*/
std::vector<std::pair<node, node>> edges() const;
/**
* Get list of neighbors of @a u.
*
* @param u Node.
* @return List of neighbors of @a u.
*/
std::vector<node> neighbors(node u) const;
/**
* Get i-th (outgoing) neighbor of @a u.
* WARNING: This function is deprecated or only temporary.
*
* @param u Node.
* @param i index; should be in [0, degreeOut(u))
* @return @a i -th (outgoing) neighbor of @a u, or @c none if no such
* neighbor exists.
*/
template <bool graphIsDirected> node getIthNeighbor(node u, index i) const {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v))
return v;
else
return none;
}
/* Derivative Graphs */
/**
* Return an undirected version of this graph.
*
* @return undirected graph.
*/
Graph toUndirected() const;
/**
* Return an unweighted version of this graph.
*
* @return unweighted graph.
*/
Graph toUnweighted() const;
/**
* Return the transpose of this graph. The graph must be directed.
*
* @return transpose of the graph.
*/
Graph transpose() const;
/* NODE ITERATORS */
/**
* Iterate over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void forNodes(L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda
* closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void parallelForNodes(L handle) const;
/** Iterate over all nodes of the graph and call @a handle (lambda closure) as
* long as @a condition remains true. This allows for breaking from a node
* loop.
*
* @param condition Returning <code>false</code> breaks the loop.
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename C, typename L>
void forNodesWhile(C condition, L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda
* closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void forNodesInRandomOrder(L handle) const;
/**
* Iterate in parallel over all nodes of the graph and call handler (lambda
* closure). Using schedule(guided) to remedy load-imbalances due to e.g.
* unequal degree distribution.
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void balancedParallelForNodes(L handle) const;
/**
* Iterate over all undirected pairs of nodes and call @a handle (lambda
* closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template <typename L> void forNodePairs(L handle) const;
/**
* Iterate over all undirected pairs of nodes in parallel and call @a handle
* (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template <typename L> void parallelForNodePairs(L handle) const;
/* EDGE ITERATORS */
/**
* Iterate over all edges of the const graph and call @a handle (lambda
* closure).
*
* @param handle Takes parameters <code>(node, node)</code>, <code>(node,
* node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node,
* node, edgeweight, edgeid)</code>.
*/
template <typename L> void forEdges(L handle) const;
/**
* Iterate in parallel over all edges of the const graph and call @a handle
* (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code> or <code>(node,
* node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node,
* node, edgeweight, edgeid)</code>.
*/
template <typename L> void parallelForEdges(L handle) const;
/* NEIGHBORHOOD ITERATORS */
/**
* Iterate over all neighbors of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameter <code>(node)</code> or <code>(node,
* edgeweight)</code> which is a neighbor of @a u.
* @note For directed graphs only outgoing edges from @a u are considered.
* A node is its own neighbor if there is a self-loop.
*
*/
template <typename L> void forNeighborsOf(node u, L handle) const;
/**
* Iterate over all incident edges of a node and call @a handle (lamdba
* closure).
*
* @param u Node.
* @param handle Takes parameters <code>(node, node)</code>, <code>(node,
* node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node,
* node, edgeweight, edgeid)</code> where the first node is @a u and the
* second is a neighbor of @a u.
* @note For undirected graphs all edges incident to @a u are also outgoing
* edges.
*/
template <typename L> void forEdgesOf(node u, L handle) const;
/**
* Iterate over all neighbors of a node and call handler (lamdba closure).
* For directed graphs only incoming edges from u are considered.
*/
template <typename L> void forInNeighborsOf(node u, L handle) const;
/**
* Iterate over all incoming edges of a node and call handler (lamdba
* closure).
* @note For undirected graphs all edges incident to u are also incoming
* edges.
*
* Handle takes parameters (u, v) or (u, v, w) where w is the edge weight.
*/
template <typename L> void forInEdgesOf(node u, L handle) const;
/* REDUCTION ITERATORS */
/**
* Iterate in parallel over all nodes and sum (reduce +) the values returned
* by the handler
*/
template <typename L> double parallelSumForNodes(L handle) const;
/**
* Iterate in parallel over all edges and sum (reduce +) the values returned
* by the handler
*/
template <typename L> double parallelSumForEdges(L handle) const;
/* GRAPH SEARCHES */
/**
* Iterate over nodes in breadth-first search order starting from r until
* connected component of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void BFSfrom(node r, L handle) const;
template <typename L>
void BFSfrom(const std::vector<node> &startNodes, L handle) const;
template <typename L> void BFSEdgesFrom(node r, L handle) const;
/**
* Iterate over nodes in depth-first search order starting from r until
* connected component of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void DFSfrom(node r, L handle) const;
template <typename L> void DFSEdgesFrom(node r, L handle) const;
};
/* NODE ITERATORS */
template <typename L> void Graph::forNodes(L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template <typename L> void Graph::parallelForNodes(L handle) const {
#pragma omp parallel for
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
handle(v);
}
}
}
template <typename C, typename L>
void Graph::forNodesWhile(C condition, L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
if (!condition()) {
break;
}
handle(v);
}
}
}
template <typename L> void Graph::forNodesInRandomOrder(L handle) const {
std::vector<node> randVec = nodes();
std::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG());
for (node v : randVec) {
handle(v);
}
}
template <typename L> void Graph::balancedParallelForNodes(L handle) const {
#pragma omp parallel for schedule( \
guided) // TODO: define min block size (and test it!)
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
handle(v);
}
}
}
template <typename L> void Graph::forNodePairs(L handle) const {
for (node u = 0; u < z; ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
template <typename L> void Graph::parallelForNodePairs(L handle) const {
#pragma omp parallel for schedule(guided)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
/* EDGE ITERATORS */
/* HELPERS */
template <bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getOutEdgeWeight(node u, index i) const {
return outEdgeWeights[u][i];
}
template <> // implementation for weighted == false
inline edgeweight Graph::getOutEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template <bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getInEdgeWeight(node u, index i) const {
return inEdgeWeights[u][i];
}
template <> // implementation for weighted == false
inline edgeweight Graph::getInEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template <bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getOutEdgeId(node u, index i) const {
return outEdgeIds[u][i];
}
template <> // implementation for hasEdgeIds == false
inline edgeid Graph::getOutEdgeId<false>(node, index) const {
return 0;
}
template <bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getInEdgeId(node u, index i) const {
return inEdgeIds[u][i];
}
template <> // implementation for hasEdgeIds == false
inline edgeid Graph::getInEdgeId<false>(node, index) const {
return 0;
}
template <bool graphIsDirected> // implementation for graphIsDirected == true
inline bool Graph::useEdgeInIteration(node /* u */, node v) const {
return v != none;
}
template <> // implementation for graphIsDirected == false
inline bool Graph::useEdgeInIteration<false>(node u, node v) const {
return u >= v;
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::forOutEdgesOfImpl(node u, L handle) const {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i),
getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::forInEdgesOfImpl(node u, L handle) const {
if (graphIsDirected) {
for (index i = 0; i < inEdges[u].size(); i++) {
node v = inEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i),
getInEdgeId<graphHasEdgeIds>(u, i));
}
}
} else {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i),
getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::forEdgeImpl(L handle) const {
for (node u = 0; u < z; ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u,
handle);
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::parallelForEdgesImpl(L handle) const {
#pragma omp parallel for schedule(guided)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u,
handle);
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline double Graph::parallelSumForEdgesImpl(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+ : sum)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
// undirected, do not iterate over edges twice
// {u, v} instead of (u, v); if v == none, u > v is not fulfilled
if (useEdgeInIteration<graphIsDirected>(u, v)) {
sum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i),
getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
return sum;
}
template <typename L> void Graph::forEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
forEdgeImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
forEdgeImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
forEdgeImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
forEdgeImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
forEdgeImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
forEdgeImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
forEdgeImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
forEdgeImpl<true, true, true, L>(handle);
break;
}
}
template <typename L> void Graph::parallelForEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
parallelForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
parallelForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
parallelForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
parallelForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
parallelForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
parallelForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
parallelForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
parallelForEdgesImpl<true, true, true, L>(handle);
break;
}
}
/* NEIGHBORHOOD ITERATORS */
template <typename L> void Graph::forNeighborsOf(node u, L handle) const {
forEdgesOf(u, handle);
}
template <typename L> void Graph::forEdgesOf(node u, L handle) const {
switch (weighted + 2 * edgesIndexed) {
case 0: // not weighted, no edge ids
forOutEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 1: // weighted, no edge ids
forOutEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 2: // not weighted, with edge ids
forOutEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 3: // weighted, with edge ids
forOutEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
template <typename L> void Graph::forInNeighborsOf(node u, L handle) const {
forInEdgesOf(u, handle);
}
template <typename L> void Graph::forInEdgesOf(node u, L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edge ids
forInEdgesOfImpl<false, false, false, L>(u, handle);
break;
case 1: // weighted, undirected, no edge ids
forInEdgesOfImpl<false, true, false, L>(u, handle);
break;
case 2: // unweighted, directed, no edge ids
forInEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 3: // weighted, directed, no edge ids
forInEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 4: // unweighted, undirected, with edge ids
forInEdgesOfImpl<false, false, true, L>(u, handle);
break;
case 5: // weighted, undirected, with edge ids
forInEdgesOfImpl<false, true, true, L>(u, handle);
break;
case 6: // unweighted, directed, with edge ids
forInEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 7: // weighted, directed, with edge ids
forInEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
/* REDUCTION ITERATORS */
template <typename L> double Graph::parallelSumForNodes(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+ : sum)
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
sum += handle(v);
}
}
return sum;
}
template <typename L> double Graph::parallelSumForEdges(L handle) const {
double sum = 0.0;
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, true, true, L>(handle);
break;
}
return sum;
}
/* GRAPH SEARCHES */
template <typename L> void Graph::BFSfrom(node r, L handle) const {
std::vector<node> startNodes(1, r);
BFSfrom(startNodes, handle);
}
template <typename L>
void Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q, qNext;
count dist = 0;
// enqueue start nodes
for (node u : startNodes) {
q.push(u);
marked[u] = true;
}
do {
node u = q.front();
q.pop();
// apply function
callBFSHandle(handle, u, dist);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
qNext.push(v);
marked[v] = true;
}
});
if (q.empty() && !qNext.empty()) {
q.swap(qNext);
++dist;
}
} while (!q.empty());
}
template <typename L> void Graph::BFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q;
q.push(r); // enqueue root
marked[r] = true;
do {
node u = q.front();
q.pop();
// apply function
forNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) {
if (!marked[v]) {
handle(u, v, w, eid);
q.push(v);
marked[v] = true;
}
});
} while (!q.empty());
}
template <typename L> void Graph::DFSfrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
handle(u);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
template <typename L> void Graph::DFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
handle(u, v);
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
} /* namespace NetworKit */
#endif /* GRAPH_H_ */
|
test_fully_choose.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
void max_cell(double **matrix, int start, int rows, int columns) {
double max_v = matrix[start][start];
int max_i = start, max_j = start;
for (int i = start; i < rows; i++) {
for (int j = start; j < columns - 1; j++) { //wyraz wolny nie jest brany pod uwagę
if (max_v < matrix[i][j]) {
max_i = i;
max_j = j;
max_v = matrix[i][j];
}
}
}
//printf("Sekwencyjnie:\nmax_v: %f\nmax_i: %d\nmax_j: %d\n", matrix[max_i][max_j], max_i, max_j);
}
void max_cell_reduction(double **matrix, int start, int rows, int columns) {
double max_v = matrix[start][start];
int max_i = start, max_j = start;
#pragma omp parallel for schedule(guided) collapse(2) reduction(max: max_v)
for (int i = start; i < rows; i++) {
for (int j = start; j < columns - 1; j++) { //wyraz wolny nie jest brany pod uwagę
if (max_v < matrix[i][j]) {
//max_i = i;
//max_j = j;
max_v = matrix[i][j];
}
}
}
//printf("Redukcja:\nmax_v: %f\nmax_i: %d\nmax_j: %d\n", max_v, max_i, max_j);
}
void full_choose(double **matrix, int rows, int columns, int sub_matrix_size, int *pv) {
int start = rows - sub_matrix_size,
max_i = start,
max_j = start,
priv_max_j = start,
priv_max_i = start;
double max_v = matrix[start][start], priv_max_v = matrix[start][start];
if (start >= rows) return;
#pragma omp parallel default(none) shared(matrix, rows, columns, sub_matrix_size, pv, max_v, max_i, max_j, start) firstprivate(priv_max_i,priv_max_j,priv_max_v)
{
#pragma omp for schedule(static) collapse(2)
for (int i = start; i < rows; i++) {
for (int j = start; j < columns - 1; j++) { //wyraz wolny nie jest brany pod uwagę
if (priv_max_v < matrix[i][j]) {
priv_max_i = i;
priv_max_j = j;
priv_max_v = matrix[i][j];
}
}
}
#pragma omp flush (max_v)
{
if (priv_max_v > max_v) {
#pragma omp critical
{
if (priv_max_v > max_v) {
//printf("%f > %f\n", priv_max_v, max_v);
max_v = priv_max_v;
max_i = priv_max_i;
max_j = priv_max_j;
}
}
}
}
}
//printf("Równolegle:\nmax_v: %f\nmax_i: %d\nmax_j: %d\n", matrix[max_i][max_j], max_i, max_j);
if (matrix[start][start] < matrix[max_i][max_j]) {
//przesuwam wiersz z maximum na górę
if (matrix[start] != matrix[max_i]) {
double *tmp = matrix[start];
matrix[start] = matrix[max_i];
matrix[max_i] = tmp;
}
//przesuwam kolumnę zawierającą maksimum na początek
if (start != max_j) {
int tmp = pv[start];
pv[start] = pv[max_j];
pv[max_j] = tmp;
}
}
}
int main(int argc, char **argv) {
double **equations = NULL;
int i, j, rows = 10000, columns = 10001;
int *pv = malloc(sizeof(int)*columns);
clock_t t;
srand(time(NULL));
equations = (double**)malloc(sizeof(equations)*rows);
for (i = 0; i < rows; i++)
equations[i] = (double*)malloc(sizeof(double)*columns);
for (i = 0; i < rows; i++)
for (j = 0; j < columns; j++)
equations[i][j] = (double)rand() / RAND_MAX;
omp_set_dynamic(1);
t = clock();
full_choose(equations, rows, columns, rows, pv);
t = clock() - t;
printf("Czas przetworzenia z OpenMP: %f\n", ((double)t) / CLOCKS_PER_SEC);
t = clock();
max_cell_reduction(equations, 0, rows, columns);
t = clock() - t;
printf("Czas przetworzenia Redukcja: %f\n", ((double)t) / CLOCKS_PER_SEC);
t = clock();
max_cell(equations, 0, rows, columns);
t = clock() - t;
printf("Czas przetworzenia SEKWENCYJNIE: %f\n", ((double)t) / CLOCKS_PER_SEC);
for (i = 0; i < rows; i++)
free(equations[i]);
free(equations);
free(pv);
return EXIT_SUCCESS;
} |
DRB056-jacobi2d-tile-no.c | /**
* jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite.
* Jacobi with array copying, no reduction. with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 20x1000. */
#include "polybench/jacobi-2d-imper.h"
/* Array initialization. */
static void init_array(int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c2;
int c4;
int c3;
if (n >= 1) {
#pragma omp parallel for private(c3, c4, c2)
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {
A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;
B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double A[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",A[i][j]);
if ((i * n + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_jacobi_2d_imper(int tsteps,int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i;
//int j;
//#pragma scop
{
int c0;
int c1;
int c3;
int c2;
int c4;
int c5;
if (n >= 3 && tsteps >= 1) {
for (c0 = 0; c0 <= (((n + 3 * tsteps + -4) * 16 < 0?((16 < 0?-((-(n + 3 * tsteps + -4) + 16 + 1) / 16) : -((-(n + 3 * tsteps + -4) + 16 - 1) / 16))) : (n + 3 * tsteps + -4) / 16)); c0++) {
#pragma omp parallel for private(c5, c4, c2, c3)
for (c1 = (((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) > (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))?((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) : (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))); c1 <= (((((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) < c0?(((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) : c0)); c1++) {
for (c2 = ((((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) > 2 * c0 + -2 * c1?(((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) : 2 * c0 + -2 * c1); c2 <= (((((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) < (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16))?(((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) : (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16)))); c2++) {
if (c0 <= (((32 * c1 + 16 * c2 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 + 1) / 32) : -((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 - 1) / 32))) : (32 * c1 + 16 * c2 + -1 * n + 1) / 32)) && c1 <= c2 + -1) {
if ((n + 1) % 2 == 0) {
for (c4 = (16 * c1 > 16 * c2 + -1 * n + 3?16 * c1 : 16 * c2 + -1 * n + 3); c4 <= 16 * c1 + 15; c4++) {
A[-16 * c2 + c4 + n + -2][n + -2] = B[-16 * c2 + c4 + n + -2][n + -2];
}
}
}
if (c0 <= (((48 * c1 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(48 * c1 + -1 * n + 1) + 32 + 1) / 32) : -((-(48 * c1 + -1 * n + 1) + 32 - 1) / 32))) : (48 * c1 + -1 * n + 1) / 32)) && c1 >= c2) {
if ((n + 1) % 2 == 0) {
for (c5 = (16 * c2 > 16 * c1 + -1 * n + 3?16 * c2 : 16 * c1 + -1 * n + 3); c5 <= ((16 * c1 < 16 * c2 + 15?16 * c1 : 16 * c2 + 15)); c5++) {
A[n + -2][-16 * c1 + c5 + n + -2] = B[n + -2][-16 * c1 + c5 + n + -2];
}
}
}
for (c3 = ((((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 16 * c0 + -16 * c1?(((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 16 * c0 + -16 * c1); c3 <= ((((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) < 16 * c0 + -16 * c1 + 15?((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) : 16 * c0 + -16 * c1 + 15)); c3++) {
if (c1 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
for (c5 = (16 * c2 > 2 * c3 + 1?16 * c2 : 2 * c3 + 1); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[1][-2 * c3 + c5] = 0.2 * (A[1][-2 * c3 + c5] + A[1][-2 * c3 + c5 - 1] + A[1][1 + (-2 * c3 + c5)] + A[1 + 1][-2 * c3 + c5] + A[1 - 1][-2 * c3 + c5]);
}
}
for (c4 = (16 * c1 > 2 * c3 + 2?16 * c1 : 2 * c3 + 2); c4 <= ((16 * c1 + 15 < 2 * c3 + n + -2?16 * c1 + 15 : 2 * c3 + n + -2)); c4++) {
if (c2 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
B[-2 * c3 + c4][1] = 0.2 * (A[-2 * c3 + c4][1] + A[-2 * c3 + c4][1 - 1] + A[-2 * c3 + c4][1 + 1] + A[1 + (-2 * c3 + c4)][1] + A[-2 * c3 + c4 - 1][1]);
}
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[-2 * c3 + c4][-2 * c3 + c5] = 0.2 * (A[-2 * c3 + c4][-2 * c3 + c5] + A[-2 * c3 + c4][-2 * c3 + c5 - 1] + A[-2 * c3 + c4][1 + (-2 * c3 + c5)] + A[1 + (-2 * c3 + c4)][-2 * c3 + c5] + A[-2 * c3 + c4 - 1][-2 * c3 + c5]);
A[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1] = B[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1];
}
if (c2 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
A[-2 * c3 + c4 + -1][n + -2] = B[-2 * c3 + c4 + -1][n + -2];
}
}
if (c1 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -1?16 * c2 + 15 : 2 * c3 + n + -1)); c5++) {
A[n + -2][-2 * c3 + c5 + -1] = B[n + -2][-2 * c3 + c5 + -1];
}
}
}
if (c0 >= (((2 * c1 + c2 + -1) * 2 < 0?-(-(2 * c1 + c2 + -1) / 2) : ((2 < 0?(-(2 * c1 + c2 + -1) + - 2 - 1) / - 2 : (2 * c1 + c2 + -1 + 2 - 1) / 2)))) && c1 >= c2 + 1 && c2 <= (((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8))) {
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < 16 * c2 + n + 12?16 * c1 + 15 : 16 * c2 + n + 12)); c4++) {
B[-16 * c2 + c4 + -14][1] = 0.2 * (A[-16 * c2 + c4 + -14][1] + A[-16 * c2 + c4 + -14][1 - 1] + A[-16 * c2 + c4 + -14][1 + 1] + A[1 + (-16 * c2 + c4 + -14)][1] + A[-16 * c2 + c4 + -14 - 1][1]);
}
}
if (c0 >= (((3 * c1 + -1) * 2 < 0?-(-(3 * c1 + -1) / 2) : ((2 < 0?(-(3 * c1 + -1) + - 2 - 1) / - 2 : (3 * c1 + -1 + 2 - 1) / 2)))) && c1 <= (((((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) < c2?(((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) : c2))) {
for (c5 = (16 * c2 > 16 * c1 + 15?16 * c2 : 16 * c1 + 15); c5 <= ((16 * c2 + 15 < 16 * c1 + n + 12?16 * c2 + 15 : 16 * c1 + n + 12)); c5++) {
B[1][-16 * c1 + c5 + -14] = 0.2 * (A[1][-16 * c1 + c5 + -14] + A[1][-16 * c1 + c5 + -14 - 1] + A[1][1 + (-16 * c1 + c5 + -14)] + A[1 + 1][-16 * c1 + c5 + -14] + A[1 - 1][-16 * c1 + c5 + -14]);
}
}
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_jacobi_2d_imper(tsteps,n, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *A);
/* Be clean. */
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
interTaskDummy.c | int main (int argc, char * argv[]) {
int x = 0;
#pragma omp parallel
{
if (x > 10) {
}
}
}
|
GB_reduce_to_vector.c | //------------------------------------------------------------------------------
// GB_reduce_to_vector: reduce a matrix to a vector using a binary op
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLS: GB_build
// C<M> = accum (C,reduce(A)) where C is n-by-1. Reduces a matrix A or A'
// to a vector.
#include "GB_reduce.h"
#include "GB_build.h"
#include "GB_ek_slice.h"
#include "GB_accum_mask.h"
#ifndef GBCOMPACT
#include "GB_red__include.h"
#endif
#define GB_FREE_ALL GB_MATRIX_FREE (&T) ;
GrB_Info GB_reduce_to_vector // C<M> = accum (C,reduce(A))
(
GrB_Matrix C, // input/output for results, size n-by-1
const GrB_Matrix M, // optional M for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C,T)
const GrB_BinaryOp reduce, // reduce operator for T=reduce(A)
const GB_void *terminal, // for early exit (NULL if none)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc, // descriptor for C, M, and A
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// C may be aliased with M and/or A
GB_RETURN_IF_NULL_OR_FAULTY (C) ;
GB_RETURN_IF_FAULTY (M) ;
GB_RETURN_IF_FAULTY (accum) ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
GB_RETURN_IF_FAULTY (desc) ;
ASSERT_OK (GB_check (C, "C input for reduce_BinaryOp", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for reduce_BinaryOp", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (accum, "accum for reduce_BinaryOp", GB0)) ;
ASSERT_OK (GB_check (reduce, "reduce for reduce_BinaryOp", GB0)) ;
ASSERT_OK (GB_check (A, "A input for reduce_BinaryOp", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (desc, "desc for reduce_BinaryOp", GB0)) ;
GrB_Matrix T = NULL ;
// get the descriptor
GB_GET_DESCRIPTOR (info, desc, C_replace, Mask_comp, A_transpose, xx1, xx2);
// C and M are n-by-1 GrB_Vector objects, typecasted to GrB_Matrix
ASSERT (GB_VECTOR_OK (C)) ;
ASSERT (GB_IMPLIES (M != NULL, GB_VECTOR_OK (M))) ;
// check domains and dimensions for C<M> = accum (C,T)
GrB_Type ttype = reduce->ztype ;
GB_OK (GB_compatible (C->type, C, M, accum, ttype, Context)) ;
// check types of reduce
if (reduce->xtype != reduce->ztype || reduce->ytype != reduce->ztype)
{
// all 3 types of z = reduce (x,y) must be the same. reduce must also
// be associative but there is no way to check this in general.
return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG,
"All domains of reduction operator must be identical;\n"
"operator is: [%s] = %s ([%s],[%s])", reduce->ztype->name,
reduce->name, reduce->xtype->name, reduce->ytype->name))) ;
}
// T = reduce (T,A) must be compatible
if (!GB_Type_compatible (A->type, reduce->ztype))
{
return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG,
"incompatible type for reduction operator z=%s(x,y):\n"
"input matrix A of type [%s]\n"
"cannot be typecast to reduction operator of type [%s]",
reduce->name, A->type->name, reduce->ztype->name))) ;
}
// check the dimensions
int64_t n = GB_NROWS (C) ;
if (A_transpose)
{
if (n != GB_NCOLS (A))
{
return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG,
"w=reduce(A'): length of w is "GBd";\n"
"it must match the number of columns of A, which is "GBd".",
n, GB_NCOLS (A)))) ;
}
}
else
{
if (n != GB_NROWS(A))
{
return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG,
"w=reduce(A): length of w is "GBd";\n"
"it must match the number of rows of A, which is "GBd".",
n, GB_NROWS (A)))) ;
}
}
// quick return if an empty mask is complemented
GB_RETURN_IF_QUICK_MASK (C, C_replace, M, Mask_comp) ;
//--------------------------------------------------------------------------
// delete any lingering zombies and assemble any pending tuples
//--------------------------------------------------------------------------
// GB_WAIT (C) ;
GB_WAIT (M) ;
GB_WAIT (A) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
//--------------------------------------------------------------------------
// handle the CSR/CSC format of A
//--------------------------------------------------------------------------
// the result vector T is in CSC format
if (!(A->is_csc))
{
A_transpose = !A_transpose ;
}
//--------------------------------------------------------------------------
// T = reduce (A) or reduce (A')
//--------------------------------------------------------------------------
// T is created below so that it can be typecasted to a GrB_Vector when
// done: non-hypersparse n-by-1 matrix in CSC format.
// T = reduce_to_vector (A) or reduce_to_vector (A'), which is T = sum (A')
// or sum (A), in MATLAB notation, except where where 'sum' is any
// associative operator.
// By default, T(i) = op (A (i,:)) is a vector whose length is the same as
// the number of rows of A. T(i) is the reduction of all entries in the
// ith row of A. If A_transpose is true, the T is computed as if A were
// transposed first, and thus its length is equal to the number of vectors
// of the input matrix A. The use of A_transpose is the opposite of
// MATLAB, since sum(A) in MATLAB sums up the columns of A, and sum(A')
// sums up the rows of A..
// T is an n-by-1 GrB_Matrix that represents the vector. It is computed
// as a GrB_Matrix so it can be passed to GB_accum_mask without
// typecasting.
ASSERT (n == (A_transpose) ? A->vdim : A->vlen) ;
//--------------------------------------------------------------------------
// scalar workspace
//--------------------------------------------------------------------------
size_t asize = A->type->size ;
GB_Type_code acode = A->type->code ;
const int64_t *restrict Ai = A->i ;
const GB_void *restrict Ax = A->x ;
int64_t anvec = A->nvec ;
int64_t anz = GB_NNZ (A) ;
size_t zsize = reduce->ztype->size ;
GB_Type_code zcode = reduce->ztype->code ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// T = reduce(A) or reduce(A')
//--------------------------------------------------------------------------
GxB_binary_function freduce = reduce->function ;
GB_cast_function cast_A_to_Z = GB_cast_factory (zcode, acode) ;
bool nocasting = (A->type == reduce->ztype) ;
if (A_transpose)
{
//----------------------------------------------------------------------
// T = reduce(A'), where T(j) = reduce (A (:,j))
//----------------------------------------------------------------------
// Each vector A(:,j) is reduced to the scalar T(j)
//----------------------------------------------------------------------
// allocate T, including T->p, T->i, and T->x. T is not hypersparse.
//----------------------------------------------------------------------
// since T is a GrB_Vector, it is CSC and not hypersparse
GB_CREATE (&T, ttype, n, 1, GB_Ap_calloc, true,
GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, anvec, true, Context) ;
GB_OK (info) ;
ASSERT (GB_VECTOR_OK (T)) ;
T->p [0] = 0 ;
T->p [1] = anvec ;
int64_t *restrict Ti = T->i ;
GB_void *restrict Tx = T->x ;
T->nvec_nonempty = (anvec > 0) ? 1 : 0 ;
T->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// symbolic phase
//----------------------------------------------------------------------
// Construct the pattern of T. The kth vector in A creates one entry
// in T, but it is flagged as a zombie if it is empty.
int64_t nzombies = 0 ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
int nth = GB_nthreads (anvec, chunk, nthreads_max) ;
#pragma omp parallel for num_threads(nth) schedule(static) \
reduction(+:nzombies)
for (int64_t k = 0 ; k < anvec ; k++)
{
// if A(:,j) is empty, then the entry in T becomes a zombie
int64_t j = (Ah == NULL) ? k : Ah [k] ;
int64_t jnz = Ap [k+1] - Ap [k] ;
if (jnz == 0)
{
// A(:,j) is empty: T(j) is a zombie
Ti [k] = GB_FLIP (j) ;
nzombies++ ;
}
else
{
// A(:,j) has at least one entry; T(j) is live
Ti [k] = j ;
}
}
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = anvec - nzombies ;
}
ASSERT (A->nvec_nonempty == (anvec - nzombies)) ;
T->nzombies = nzombies ;
//----------------------------------------------------------------------
// slice the entries of A for the numeric phase
//----------------------------------------------------------------------
// Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1
// and vectors kfirst_slice [tid] to klast_slice [tid]. The first and
// last vectors may be shared with prior slices and subsequent slices.
int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, anz) ;
ntasks = GB_IMAX (ntasks, 1) ;
int64_t pstart_slice [ntasks+1] ;
int64_t kfirst_slice [ntasks] ;
int64_t klast_slice [ntasks] ;
GB_ek_slice (pstart_slice, kfirst_slice, klast_slice, A, ntasks) ;
//----------------------------------------------------------------------
// numeric phase: launch the switch factory
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
#define GB_red(opname,aname) GB_red_eachvec_ ## opname ## aname
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, A, \
kfirst_slice, klast_slice, pstart_slice, ntasks, \
nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
if (nocasting)
{
// controlled by opcode and typecode. No typecasting is done.
GB_Opcode opcode = reduce->opcode ;
GB_Type_code typecode = acode ;
ASSERT (typecode <= GB_UDT_code) ;
#include "GB_red_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: with typecasting
//----------------------------------------------------------------------
if (!done)
{
#define GB_ATYPE GB_void
#define GB_CTYPE GB_void
// workspace for each thread
#define GB_REDUCTION_WORKSPACE(W, ntasks) \
GB_void W [ntasks*zsize]
// ztype s ;
#define GB_SCALAR(s) \
GB_void s [zsize]
// ztype s = (ztype) Ax [p], with typecast
#define GB_CAST_ARRAY_TO_SCALAR(s,Ax,p) \
cast_A_to_Z (s, Ax +((p)*asize), zsize) ; \
// s += (ztype) Ax [p], with typecast
#define GB_ADD_CAST_ARRAY_TO_SCALAR(s, Ax, p) \
GB_void awork [zsize] ; \
cast_A_to_Z (awork, Ax +((p)*asize), zsize) ; \
freduce (s, s, awork) ;
// W [k] = s, no typecast
#define GB_COPY_SCALAR_TO_ARRAY(W,k,s) \
memcpy (W +((k)*zsize), s, zsize) ;
// W [k] = S [i], no typecast
#define GB_COPY_ARRAY_TO_ARRAY(W,k,S,i) \
memcpy (W +((k)*zsize), S +((i)*zsize), zsize) ;
// W [k] += S [i], no typecast
#define GB_ADD_ARRAY_TO_ARRAY(W,k,S,i) \
freduce (W +((k)*zsize), W +((k)*zsize), S +((i)*zsize)) ;
// W [k] += s, no typecast
#define GB_ADD_SCALAR_TO_ARRAY(W,k,s) \
freduce (W +((k)*zsize), W +((k)*zsize), s) ;
// break if terminal value reached
#define GB_BREAK_IF_TERMINAL(t) \
if (terminal != NULL) \
{ \
if (memcmp (t, terminal, zsize) == 0) break ; \
}
#include "GB_reduce_each_vector.c"
}
//----------------------------------------------------------------------
// wrapup: delete any zombies
//----------------------------------------------------------------------
ASSERT_OK (GB_check (T, "T before wait", GB_FLIP (GB0)));
if (nzombies > 0)
{
ASSERT (GB_VECTOR_OK (T)) ;
ASSERT (!GB_PENDING (T)) ;
ASSERT (GB_ZOMBIES (T)) ;
GB_OK (GB_wait (T, Context)) ;
}
ASSERT_OK (GB_check (T, "T output = reduce_each_vector (A)", GB0)) ;
}
else
{
//----------------------------------------------------------------------
// T = reduce(A), where T(i) = reduce (A (i,:))
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
// When A_transpose is false (after flipping it to account for the
// CSR/CSC format), n is A->vlen, the vector length of A. This is
// the number of rows of a CSC matrix, or the # of columns of a CSR
// matrix. The matrix A itself requires O(vdim+anz) memory if
// non-hypersparse and O(anz) if hypersparse. This does not depend on
// A->vlen. So if the vector length is really huge (when anz << n),
// the bucket method would fail. Thus, the qsort method, below, is
// used when A is very sparse.
if (GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, n))
{
//------------------------------------------------------------------
// qsort method
//------------------------------------------------------------------
// memory usage is O(anz) and time is O(anz*log(anz)). This is
// more efficient than the bucket method, below, when A is very
// hypersparse. The time and memory complexity does not depend
// on n.
// since T is a GrB_Vector, it is not hypersparse
GB_NEW (&T, ttype, n, 1, GB_Ap_null, true, GB_FORCE_NONHYPER,
GB_HYPER_DEFAULT, 1, Context) ;
GB_OK (info) ;
// GB_build treats Ai and Ax as read-only; they must not be modified
GB_OK (GB_build
(
T, // construct result in the T vector
(GrB_Index *) Ai, // indices inside the vector
NULL, // vector indices (none)
Ax, // values, of size anz
anz, // number of tuples
reduce, // reduction operator
acode, // type code of the Ax array
false, // the input is a vector
false, // indices do not need to be checked
Context
)) ;
ASSERT (T->nvec_nonempty == GB_nvec_nonempty (T, NULL)) ;
}
else
{
//------------------------------------------------------------------
// bucket method
//------------------------------------------------------------------
// Determine number of threads to use for constructing the buckets.
// Each thread requires O(n) workspace, so this method does not
// scale well when there are many threads compared to anz. Total
// workspace is O(n*nth), so limit the # of threads used so that at
// most anz workspace is used. Each thread takes a single task.
int nth = anz / n ;
nth = GB_IMIN (nth, nthreads) ;
nth = GB_IMAX (nth, 1) ;
//------------------------------------------------------------------
// slice the entries for each thread
//------------------------------------------------------------------
// Thread tid does entries pstart_slice [tid] to
// pstart_slice [tid+1]-1. No need to compute kfirst or klast.
int64_t pstart_slice [nth+1] ;
GB_eslice (pstart_slice, anz, nth) ;
//------------------------------------------------------------------
// sum across each index: T(i) = reduce (A (i,:))
//------------------------------------------------------------------
// Early exit cannot be exploited; ignore the terminal value.
#undef GB_red
#define GB_red(opname,aname) GB_red_eachindex_ ## opname ## aname
#undef GB_RED_WORKER
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) (&T, ttype, A, pstart_slice, \
nth, nthreads, Context) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
bool done = false ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#ifndef GBCOMPACT
if (nocasting)
{
// controlled by opcode and typecode. No typecasting
GB_Opcode opcode = reduce->opcode ;
GB_Type_code typecode = acode ;
ASSERT (typecode <= GB_UDT_code) ;
#include "GB_red_factory.c"
if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE))
{
// out of memory
GB_FREE_ALL ;
return (info) ;
}
}
#endif
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
#include "GB_reduce_each_index.c"
}
}
ASSERT_OK (GB_check (T, "T output for T = reduce_each_index (A)", GB0));
}
//--------------------------------------------------------------------------
// C<M> = accum (C,T): accumulate the results into C via the mask
//--------------------------------------------------------------------------
return (GB_ACCUM_MASK (C, M, NULL, accum, &T, C_replace, Mask_comp)) ;
}
|
csf.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "csf.h"
#include "sort.h"
#include "tile.h"
#include "util.h"
#include "thread_partition.h"
#include "io.h"
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_csf_load(
char const * const fname,
splatt_idx_t * nmodes,
splatt_csf ** tensors,
double const * const options)
{
sptensor_t * tt = tt_read(fname);
if(tt == NULL) {
return SPLATT_ERROR_BADINPUT;
}
tt_remove_empty(tt);
*tensors = csf_alloc(tt, options);
*nmodes = tt->nmodes;
tt_free(tt);
return SPLATT_SUCCESS;
}
int splatt_csf_convert(
splatt_idx_t const nmodes,
splatt_idx_t const nnz,
splatt_idx_t ** const inds,
splatt_val_t * const vals,
splatt_csf ** tensors,
double const * const options)
{
sptensor_t tt;
tt_fill(&tt, nnz, nmodes, inds, vals);
tt_remove_empty(&tt);
*tensors = csf_alloc(&tt, options);
return SPLATT_SUCCESS;
}
void free_csf(
splatt_csf * tensors,
double const * const options)
{
csf_free(tensors, options);
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Count the nonzeros below a given node in a CSF tensor.
*
* @param fptr The adjacency pointer of the CSF tensor.
* @param nmodes The number of modes in the tensor.
* @param depth The depth of the node
* @param fiber The id of the node.
*
* @return The nonzeros below fptr[depth][fiber].
*/
idx_t p_csf_count_nnz(
idx_t * * fptr,
idx_t const nmodes,
idx_t depth,
idx_t const fiber)
{
if(depth == nmodes-1) {
return 1;
}
idx_t left = fptr[depth][fiber];
idx_t right = fptr[depth][fiber+1];
++depth;
for(; depth < nmodes-1; ++depth) {
left = fptr[depth][left];
right = fptr[depth][right];
}
return right - left;
}
/**
* @brief Find a permutation of modes that results in non-increasing mode size.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_small(
idx_t const * const dims,
idx_t const nmodes,
idx_t * const perm_dims)
{
idx_t sorted[MAX_NMODES];
idx_t matched[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
sorted[m] = dims[m];
matched[m] = 0;
}
quicksort(sorted, nmodes);
/* silly n^2 comparison to grab modes from sorted dimensions.
* TODO: make a key/val sort...*/
for(idx_t mfind=0; mfind < nmodes; ++mfind) {
for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) {
if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) {
perm_dims[mfind] = mcheck;
matched[mcheck] = 1;
break;
}
}
}
}
/**
* @brief Find a permutation of modes such that the first mode is 'custom-mode'
* and the remaining are naturally ordered (0, 1, ...).
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param custom_mode The mode to place first.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_inorder(
idx_t const * const dims,
idx_t const nmodes,
idx_t const custom_mode,
idx_t * const perm_dims)
{
/* initialize to natural ordering */
for(idx_t m=0; m < nmodes; ++m) {
perm_dims[m] = m;
}
/* find where custom_mode was placed and adjust from there */
for(idx_t m=0; m < nmodes; ++m) {
if(perm_dims[m] == custom_mode) {
memmove(perm_dims + 1, perm_dims, (m) * sizeof(m));
perm_dims[0] = custom_mode;
break;
}
}
}
/**
* @brief Find a permutation of modes such that the first mode is 'custom-mode'
* and the remaining are sorted in non-increasing order.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param custom_mode The mode to place first.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_minusone(
idx_t const * const dims,
idx_t const nmodes,
idx_t const custom_mode,
idx_t * const perm_dims)
{
p_order_dims_small(dims, nmodes, perm_dims);
/* find where custom_mode was placed and adjust from there */
for(idx_t m=0; m < nmodes; ++m) {
if(perm_dims[m] == custom_mode) {
memmove(perm_dims + 1, perm_dims, (m) * sizeof(m));
perm_dims[0] = custom_mode;
break;
}
}
}
/**
* @brief Find a permutation of modes that results in non-decreasing mode size.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_large(
idx_t const * const dims,
idx_t const nmodes,
idx_t * const perm_dims)
{
idx_t sorted[MAX_NMODES];
idx_t matched[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
sorted[m] = dims[m];
matched[m] = 0;
}
/* sort small -> large */
quicksort(sorted, nmodes);
/* reverse list */
for(idx_t m=0; m < nmodes/2; ++m) {
idx_t tmp = sorted[nmodes-m-1];
sorted[nmodes-m-1] = sorted[m];
sorted[m] = tmp;
}
/* silly n^2 comparison to grab modes from sorted dimensions.
* TODO: make a key/val sort...*/
for(idx_t mfind=0; mfind < nmodes; ++mfind) {
for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) {
if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) {
perm_dims[mfind] = mcheck;
matched[mcheck] = 1;
break;
}
}
}
}
/**
* @brief Construct the sparsity structure of the outer-mode of a CSF tensor.
*
* @param ct The CSF tensor to construct.
* @param tt The coordinate tensor to construct from. Assumed to be already
* sorted.
* @param tile_id The ID of the tile to construct.
* @param nnztile_ptr A pointer into 'tt' that marks the start of each tile.
*/
static void p_mk_outerptr(
splatt_csf * const ct,
sptensor_t const * const tt,
idx_t const tile_id,
idx_t const * const nnztile_ptr)
{
idx_t const nnzstart = nnztile_ptr[tile_id];
idx_t const nnzend = nnztile_ptr[tile_id+1];
assert(nnzstart < nnzend);
idx_t const nnz = nnzend - nnzstart;
/* grab sparsity pattern */
csf_sparsity * const pt = ct->pt + tile_id;
/* grap top-level indices */
idx_t const * const restrict ttind =
nnzstart + tt->ind[csf_depth_to_mode(ct, 0)];
/* partition among threads */
int const nthreads = splatt_omp_get_max_threads();
idx_t * thread_parts = partition_simple(nnz, nthreads);
idx_t * thread_nfibs = malloc((nthreads+1) * sizeof(*thread_nfibs));
/* Fibers are counted by differing indices -- count at least one fiber */
thread_nfibs[0] = 1;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
idx_t const nnz_start = SS_MAX(thread_parts[tid], 1); /* skip first nz */
idx_t const nnz_end = thread_parts[tid+1];
/* count fibers in each thread's partition */
idx_t local_nfibs = 0;
for(idx_t x=nnz_start; x < nnz_end; ++x) {
assert(ttind[x-1] <= ttind[x]);
if(ttind[x] != ttind[x-1]) {
++local_nfibs;
}
}
thread_nfibs[tid+1] = local_nfibs; /* +1 for prefix sum */
#pragma omp barrier
#pragma omp single
{
/* prefix sum on # fibers */
for(int t=0; t < nthreads; ++t) {
thread_nfibs[t+1] += thread_nfibs[t];
}
idx_t const nfibs = thread_nfibs[nthreads];
ct->pt[tile_id].nfibs[0] = nfibs;
assert(nfibs <= ct->dims[csf_depth_to_mode(ct, 0)]);
pt->fptr[0] = malloc((nfibs+1) * sizeof(**(pt->fptr)));
/* only store top-level fids if we are tiling or there are gaps */
if((ct->ntiles > 1) || (tt->dims[csf_depth_to_mode(ct, 0)] != nfibs)) {
pt->fids[0] = malloc(nfibs * sizeof(**(pt->fids)));
pt->fids[0][0] = ttind[0];
} else {
pt->fids[0] = NULL;
}
pt->fptr[0][0] = 0;
pt->fptr[0][nfibs] = nnz;
} /* implied barrier */
idx_t * const restrict fp = pt->fptr[0];
idx_t * const restrict fi = pt->fids[0];
/* go back over non-zeros and mark fptr and fids */
idx_t nfound = thread_nfibs[tid];
if(fi == NULL) {
for(idx_t n=nnz_start; n < nnz_end; ++n) {
/* check for end of outer index */
if(ttind[n] != ttind[n-1]) {
fp[nfound++] = n;
}
}
} else {
for(idx_t n=nnz_start; n < nnz_end; ++n) {
/* check for end of outer index */
if(ttind[n] != ttind[n-1]) {
fi[nfound] = ttind[n];
fp[nfound++] = n;
}
}
}
} /* end omp parallel */
free(thread_parts);
free(thread_nfibs);
}
/**
* @brief Construct the sparsity structure of any mode but the last. The first
* (root) mode is handled by p_mk_outerptr and the first is simply a copy
* of the nonzeros.
*
* @param ct The CSF tensor to construct.
* @param tt The coordinate tensor to construct from. Assumed to be already
* sorted.
* @param tile_id The ID of the tile to construct.
* @param nnztile_ptr A pointer into 'tt' that marks the start of each tile.
* @param mode Which mode we are constructing.
*/
static void p_mk_fptr(
splatt_csf * const ct,
sptensor_t const * const tt,
idx_t const tile_id,
idx_t const * const nnztile_ptr,
idx_t const mode)
{
assert(mode < ct->nmodes);
idx_t const nnzstart = nnztile_ptr[tile_id];
idx_t const nnzend = nnztile_ptr[tile_id+1];
idx_t const nnz = nnzend - nnzstart;
/* outer mode is easy; just look at outer indices */
if(mode == 0) {
p_mk_outerptr(ct, tt, tile_id, nnztile_ptr);
return;
}
/* the mode after accounting for dim_perm */
idx_t const * const restrict ttind =
nnzstart + tt->ind[csf_depth_to_mode(ct, mode)];
/* grab sparsity pattern */
csf_sparsity * const pt = ct->pt + tile_id;
/* we will edit this to point to the new fiber idxs instead of nnz */
idx_t * const restrict fprev = pt->fptr[mode-1];
/* partition among threads */
int const nthreads = splatt_omp_get_max_threads();
idx_t * thread_parts = partition_simple(pt->nfibs[mode-1], nthreads);
idx_t * thread_nfibs = malloc((nthreads+1) * sizeof(*thread_nfibs));
thread_nfibs[0] = 0;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
idx_t const slice_start = thread_parts[tid];
idx_t const slice_end = thread_parts[tid+1];
/* first count nfibers */
/* foreach 'slice' in the previous dimension */
idx_t local_nfibs = 0;
for(idx_t s=slice_start; s < slice_end; ++s) {
++local_nfibs; /* one by default per 'slice' */
/* count fibers in current hyperplane*/
for(idx_t f=fprev[s]+1; f < fprev[s+1]; ++f) {
if(ttind[f] != ttind[f-1]) {
++local_nfibs;
}
}
}
thread_nfibs[tid+1] = local_nfibs; /* +1 for prefix sum */
idx_t const fprev_end = fprev[slice_end];
#pragma omp barrier
#pragma omp single
{
/* prefix sum on # fibers */
for(int t=0; t < nthreads; ++t) {
thread_nfibs[t+1] += thread_nfibs[t];
}
idx_t const nfibs = thread_nfibs[nthreads];
pt->nfibs[mode] = nfibs;
pt->fptr[mode] = malloc((nfibs+1) * sizeof(**(pt->fptr)));
pt->fptr[mode][0] = 0;
pt->fids[mode] = malloc(nfibs * sizeof(**(pt->fids)));
} /* implied barrier */
idx_t * const restrict fp = pt->fptr[mode];
idx_t * const restrict fi = pt->fids[mode];
/* now fill in fiber info */
idx_t nfound = thread_nfibs[tid];
for(idx_t s=slice_start; s < slice_end; ++s) {
idx_t const start = fprev[s]+1;
idx_t const end = (s == slice_end - 1) ? fprev_end : fprev[s+1];
/* mark start of subtree */
fprev[s] = nfound;
fi[nfound] = ttind[start-1];
fp[nfound++] = start-1;
/* mark fibers in current hyperplane */
for(idx_t f=start; f < end; ++f) {
if(ttind[f] != ttind[f-1]) {
fi[nfound] = ttind[f];
fp[nfound++] = f;
}
}
}
/* mark end of last hyperplane */
if(tid == nthreads - 1) {
fprev[pt->nfibs[mode-1]] = thread_nfibs[nthreads];
fp[thread_nfibs[nthreads]] = nnz;
}
} /* end omp parallel */
free(thread_parts);
free(thread_nfibs);
}
/**
* @brief Allocate and fill a CSF tensor from a coordinate tensor without
* tiling.
*
* @param ct The CSF tensor to fill out.
* @param tt The sparse tensor to start from.
*/
static void p_csf_alloc_untiled(
splatt_csf * const ct,
sptensor_t * const tt)
{
idx_t const nmodes = tt->nmodes;
tt_sort(tt, ct->dim_perm[0], ct->dim_perm);
ct->ntiles = 1;
ct->ntiled_modes = 0;
for(idx_t m=0; m < nmodes; ++m) {
ct->tile_dims[m] = 1;
}
ct->pt = malloc(sizeof(*(ct->pt)));
csf_sparsity * const pt = ct->pt;
/* last row of fptr is just nonzero inds */
pt->nfibs[nmodes-1] = ct->nnz;
pt->fids[nmodes-1] = malloc(ct->nnz * sizeof(**(pt->fids)));
pt->vals = malloc(ct->nnz * sizeof(*(pt->vals)));
par_memcpy(pt->fids[nmodes-1], tt->ind[csf_depth_to_mode(ct, nmodes-1)],
ct->nnz * sizeof(**(pt->fids)));
par_memcpy(pt->vals, tt->vals, ct->nnz * sizeof(*(pt->vals)));
/* setup a basic tile ptr for one tile */
idx_t nnz_ptr[2];
nnz_ptr[0] = 0;
nnz_ptr[1] = tt->nnz;
/* create fptr entries for the rest of the modes, working down from roots.
* Skip the bottom level (nnz) */
for(idx_t m=0; m < tt->nmodes-1; ++m) {
p_mk_fptr(ct, tt, 0, nnz_ptr, m);
}
}
/**
* @brief Reorder the nonzeros in a sparse tensor using dense tiling and fill
* a CSF tensor with the data.
*
* @param ct The CSF tensor to fill.
* @param tt The sparse tensor to start from.
* @param splatt_opts Options array for SPLATT - used for tile dimensions.
*/
static void p_csf_alloc_densetile(
splatt_csf * const ct,
sptensor_t * const tt,
double const * const splatt_opts)
{
idx_t const nmodes = tt->nmodes;
/* how many levels we tile (counting from the bottom) */
ct->ntiled_modes = (idx_t)splatt_opts[SPLATT_OPTION_TILELEVEL];
ct->ntiled_modes = SS_MIN(ct->ntiled_modes, ct->nmodes);
/* how many levels from the root do we start tiling? */
idx_t const tile_depth = ct->nmodes - ct->ntiled_modes;
idx_t ntiles = 1;
for(idx_t m=0; m < nmodes; ++m) {
idx_t const depth = csf_mode_to_depth(ct, m);
if(depth >= tile_depth) {
ct->tile_dims[m] = (idx_t) splatt_opts[SPLATT_OPTION_NTHREADS];
} else {
ct->tile_dims[m] = 1;
}
ntiles *= ct->tile_dims[m];
}
/* perform tensor tiling */
tt_sort(tt, ct->dim_perm[0], ct->dim_perm);
idx_t * nnz_ptr = tt_densetile(tt, ct->tile_dims);
ct->ntiles = ntiles;
ct->pt = malloc(ntiles * sizeof(*(ct->pt)));
for(idx_t t=0; t < ntiles; ++t) {
idx_t const startnnz = nnz_ptr[t];
idx_t const endnnz = nnz_ptr[t+1];
idx_t const ptnnz = endnnz - startnnz;
csf_sparsity * const pt = ct->pt + t;
/* empty tile */
if(ptnnz == 0) {
for(idx_t m=0; m < ct->nmodes; ++m) {
pt->fptr[m] = NULL;
pt->fids[m] = NULL;
pt->nfibs[m] = 0;
}
/* first fptr may be accessed anyway */
pt->fptr[0] = (idx_t *) malloc(2 * sizeof(**(pt->fptr)));
pt->fptr[0][0] = 0;
pt->fptr[0][1] = 0;
pt->vals = NULL;
continue;
}
idx_t const leaves = nmodes-1;
/* last row of fptr is just nonzero inds */
pt->nfibs[leaves] = ptnnz;
pt->fids[leaves] = malloc(ptnnz * sizeof(**(pt->fids)));
par_memcpy(pt->fids[leaves], tt->ind[csf_depth_to_mode(ct, leaves)] + startnnz,
ptnnz * sizeof(**(pt->fids)));
pt->vals = malloc(ptnnz * sizeof(*(pt->vals)));
par_memcpy(pt->vals, tt->vals + startnnz, ptnnz * sizeof(*(pt->vals)));
/* create fptr entries for the rest of the modes */
for(idx_t m=0; m < leaves; ++m) {
p_mk_fptr(ct, tt, t, nnz_ptr, m);
}
}
free(nnz_ptr);
}
/**
* @brief Construct dim_iperm, which is the inverse of dim_perm.
*
* @param ct The CSF tensor.
*/
static void p_fill_dim_iperm(
splatt_csf * const ct)
{
for(idx_t level=0; level < ct->nmodes; ++level) {
ct->dim_iperm[ct->dim_perm[level]] = level;
}
}
/**
* @brief Allocate and fill a CSF tensor.
*
* @param ct The CSF tensor to fill.
* @param tt The coordinate tensor to work from.
* @param mode_type The allocation scheme for the CSF tensor.
* @param mode Which mode we are converting for (if applicable).
* @param splatt_opts Used to determine tiling scheme.
*/
static void p_mk_csf(
splatt_csf * const ct,
sptensor_t * const tt,
csf_mode_type mode_type,
idx_t const mode,
double const * const splatt_opts)
{
ct->nnz = tt->nnz;
ct->nmodes = tt->nmodes;
for(idx_t m=0; m < tt->nmodes; ++m) {
ct->dims[m] = tt->dims[m];
}
/* get the indices in order */
csf_find_mode_order(tt->dims, tt->nmodes, mode_type, mode, ct->dim_perm);
p_fill_dim_iperm(ct);
ct->which_tile = splatt_opts[SPLATT_OPTION_TILE];
switch(ct->which_tile) {
case SPLATT_NOTILE:
p_csf_alloc_untiled(ct, tt);
break;
case SPLATT_DENSETILE:
p_csf_alloc_densetile(ct, tt, splatt_opts);
break;
default:
fprintf(stderr, "SPLATT: tiling '%d' unsupported for CSF tensors.\n",
ct->which_tile);
break;
}
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void csf_free(
splatt_csf * const csf,
double const * const opts)
{
idx_t ntensors = 0;
splatt_csf_type which = opts[SPLATT_OPTION_CSF_ALLOC];
switch(which) {
case SPLATT_CSF_ONEMODE:
ntensors = 1;
break;
case SPLATT_CSF_TWOMODE:
ntensors = 2;
break;
case SPLATT_CSF_ALLMODE:
ntensors = csf[0].nmodes;
break;
}
for(idx_t i=0; i < ntensors; ++i) {
csf_free_mode(csf + i);
}
free(csf);
}
void csf_free_mode(
splatt_csf * const csf)
{
/* free each tile of sparsity pattern */
for(idx_t t=0; t < csf->ntiles; ++t) {
free(csf->pt[t].vals);
free(csf->pt[t].fids[csf->nmodes-1]);
for(idx_t m=0; m < csf->nmodes-1; ++m) {
free(csf->pt[t].fptr[m]);
free(csf->pt[t].fids[m]);
}
}
free(csf->pt);
}
void csf_find_mode_order(
idx_t const * const dims,
idx_t const nmodes,
csf_mode_type which,
idx_t const mode,
idx_t * const perm_dims)
{
switch(which) {
case CSF_SORTED_SMALLFIRST:
p_order_dims_small(dims, nmodes, perm_dims);
break;
case CSF_SORTED_BIGFIRST:
p_order_dims_large(dims, nmodes, perm_dims);
break;
case CSF_INORDER_MINUSONE:
p_order_dims_inorder(dims, nmodes, mode, perm_dims);
break;
case CSF_SORTED_MINUSONE:
p_order_dims_minusone(dims, nmodes, mode, perm_dims);
break;
/* no-op, perm_dims better be set... */
case CSF_MODE_CUSTOM:
break;
default:
fprintf(stderr, "SPLATT: csf_mode_type '%d' not recognized.\n", which);
break;
}
}
size_t csf_storage(
splatt_csf const * const tensors,
double const * const opts)
{
idx_t ntensors = 0;
splatt_csf_type which_alloc = opts[SPLATT_OPTION_CSF_ALLOC];
switch(which_alloc) {
case SPLATT_CSF_ONEMODE:
ntensors = 1;
break;
case SPLATT_CSF_TWOMODE:
ntensors = 2;
break;
case SPLATT_CSF_ALLMODE:
ntensors = tensors[0].nmodes;
break;
}
size_t bytes = 0;
for(idx_t m=0; m < ntensors; ++m) {
splatt_csf const * const ct = tensors + m;
bytes += ct->nnz * sizeof(*(ct->pt->vals)); /* vals */
bytes += ct->nnz * sizeof(**(ct->pt->fids)); /* fids[nmodes] */
bytes += ct->ntiles * sizeof(*(ct->pt)); /* pt */
for(idx_t t=0; t < ct->ntiles; ++t) {
csf_sparsity const * const pt = ct->pt + t;
for(idx_t m=0; m < ct->nmodes-1; ++m) {
bytes += (pt->nfibs[m]+1) * sizeof(**(pt->fptr)); /* fptr */
if(pt->fids[m] != NULL) {
bytes += pt->nfibs[m] * sizeof(**(pt->fids)); /* fids */
}
}
}
}
return bytes;
}
splatt_csf * csf_alloc(
sptensor_t * const tt,
double const * const opts)
{
splatt_csf * ret = NULL;
double * tmp_opts = NULL;
idx_t last_mode = 0;
int tmp = 0;
switch((splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC]) {
case SPLATT_CSF_ONEMODE:
ret = malloc(sizeof(*ret));
p_mk_csf(ret, tt, CSF_SORTED_SMALLFIRST, 0, opts);
break;
case SPLATT_CSF_TWOMODE:
ret = malloc(2 * sizeof(*ret));
/* regular CSF allocation */
p_mk_csf(ret + 0, tt, CSF_SORTED_SMALLFIRST, 0, opts);
/* make a copy of opts and don't tile the last mode
* TODO make this configurable? */
tmp_opts = splatt_default_opts();
memcpy(tmp_opts, opts, SPLATT_OPTION_NOPTIONS * sizeof(*opts));
tmp_opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE;
/* allocate with no tiling for the last mode */
last_mode = csf_depth_to_mode(&(ret[0]), tt->nmodes-1);
p_mk_csf(ret + 1, tt, CSF_SORTED_MINUSONE, last_mode, tmp_opts);
free(tmp_opts);
break;
case SPLATT_CSF_ALLMODE:
ret = malloc(tt->nmodes * sizeof(*ret));
for(idx_t m=0; m < tt->nmodes; ++m) {
p_mk_csf(ret + m, tt, CSF_SORTED_MINUSONE, m, opts);
}
break;
}
return ret;
}
void csf_alloc_mode(
sptensor_t * const tt,
csf_mode_type which_ordering,
idx_t const mode_special,
splatt_csf * const csf,
double const * const opts)
{
p_mk_csf(csf, tt, which_ordering, mode_special, opts);
}
val_t csf_frobsq(
splatt_csf const * const tensor)
{
/* accumulate into double to help with some precision loss */
double norm = 0;
#pragma omp parallel reduction(+:norm)
{
for(idx_t t=0; t < tensor->ntiles; ++t) {
val_t const * const vals = tensor->pt[t].vals;
if(vals == NULL) {
continue;
}
idx_t const nnz = tensor->pt[t].nfibs[tensor->nmodes-1];
#pragma omp for schedule(static) nowait
for(idx_t n=0; n < nnz; ++n) {
norm += vals[n] * vals[n];
}
}
} /* end omp parallel */
return (val_t) norm;
}
idx_t * csf_partition_1d(
splatt_csf const * const csf,
idx_t const tile_id,
idx_t const nparts)
{
idx_t const nslices = csf->pt[tile_id].nfibs[0];
idx_t * weights = malloc(nslices * sizeof(*weights));
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < nslices; ++i) {
weights[i] = p_csf_count_nnz(csf->pt[tile_id].fptr, csf->nmodes, 0, i);
}
idx_t bneck;
idx_t * parts = partition_weighted(weights, nslices, nparts, &bneck);
free(weights);
return parts;
}
idx_t * csf_partition_tiles_1d(
splatt_csf const * const csf,
idx_t const nparts)
{
idx_t const nmodes = csf->nmodes;
idx_t const ntiles = csf->ntiles;
idx_t * weights = malloc(ntiles * sizeof(*weights));
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < ntiles; ++i) {
weights[i] = csf->pt[i].nfibs[nmodes-1];
}
idx_t bneck;
idx_t * parts = partition_weighted(weights, ntiles, nparts, &bneck);
free(weights);
return parts;
}
|
ac2d_openmp.c | #include<stdio.h>
#include"2d_lib.c"
// vx
void ac2d_openmp(double *vx, int BD_nx_vx, int BD_nz_vx,
double *pvxbtpp,
double *vz, int BD_nx_vz, int BD_nz_vz,
double *pvzbtpp,
double *tpp, double *ptppbvx, double *ptppbvz,
int BD_nx_tpp, int BD_nz_tpp,
double *rho, double *lambda, double *fdc,
double dt, double dx, double dz, int ext,
double *bhalf, double *ahalf, double *bfull, double *afull)
{
int i,j;
//********************* V_X *********************//
#pragma omp parallel
{
// vxbtxx
#pragma omp for private(j) nowait
for(i=0; i<BD_nz_vx; i++)
{
for(j=1; j<ext; j++)
{
bound_x_2d(vx, BD_nz_vx, BD_nx_vx, i, j,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp)+*(rho+i+(j+1)*BD_nz_tpp)),
2.0/(*(rho+i+(BD_nx_vx-1-j)*BD_nz_tpp)+*(rho+i+(BD_nx_vx-j)*BD_nz_tpp)),
dx, dt, pvxbtpp, bhalf, ahalf, ext, fdc);
}
for(j=ext; j<BD_nx_vx-ext; j++)
{
body_x_2d(vx, BD_nz_vx, BD_nx_vx, i, j,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp)+*(rho+i+(j+1)*BD_nz_tpp)),
dx, dt, fdc);
}
}
//********************* V_Z *********************//
// vzbtzz
#pragma omp for private(i) nowait
for(j=0; j<BD_nx_vz; j++)
{
for(i=1;i<ext;i++)
{
unlimited_bound_z_2d(vz, BD_nz_vz, BD_nx_vz, i, j,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp)+*(rho+(i+1)+j*BD_nz_tpp)),
2.0/(*(rho+(BD_nz_vz-1-i)+j*BD_nz_tpp)+*(rho+(BD_nz_vz-i)+j*BD_nz_tpp)),
dz,dt, pvzbtpp, bhalf, ahalf, ext, fdc);
}
for(i=ext; i<BD_nz_vz-ext; i++)
{
body_z_2d(vz, BD_nz_vz, BD_nx_vz, i, j,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp)+*(rho+(i+1)+j*BD_nz_tpp)),
dz,dt, fdc);
}
}
#pragma omp barrier
//************ T_X ************//
#pragma omp for private(j)
for(i=0; i<BD_nz_tpp; i++)
{
for(j=2; j<ext; j++)
{
ac_bound_tpp_x_2d(tpp, BD_nz_tpp, BD_nx_tpp,
i, j, vx, BD_nz_vx, BD_nx_vx, 2, lambda, dx, dt,
ptppbvx, bfull, afull, ext, fdc);
}
for(j=ext; j<BD_nx_tpp-ext; j++)
{
ac_body_tpp_x_2d(tpp, BD_nz_tpp, BD_nx_tpp,
i, j, vx, BD_nz_vx, BD_nx_vx, 2, lambda, dx, dt, fdc);
}
}
//************ T_Z ************//
#pragma omp for private(i)
for(j=0; j<BD_nx_tpp; j++)
{
for(i=2; i<ext; i++)
{
ac_unlimited_bound_tpp_z_2d(tpp, BD_nz_tpp, BD_nx_tpp,
i, j, vz, BD_nz_vz, BD_nx_vz, 2, lambda, dz, dt,
ptppbvz, bfull, afull, ext, fdc);
}
for(i=ext; i<BD_nz_tpp-ext; i++)
{
ac_body_tpp_z_2d(tpp, BD_nz_tpp, BD_nx_tpp,
i, j, vz, BD_nz_vz, BD_nx_vz, 2, lambda, dz, dt, fdc);
}
}
}
}
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const ChannelType channel,const DrawInfo *draw_info,
% const MagickPixelPacket target,const ssize_t x_offset,
% const ssize_t y_offset,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const ChannelType channel,const DrawInfo *draw_info,
const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset,
const MagickBooleanType invert)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
ExceptionInfo
*exception;
Image
*floodplane_image;
MagickBooleanType
skip;
MagickPixelPacket
fill,
pixel;
MemoryInfo
*segment_info;
PixelPacket
fill_color;
SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
exception=(&image->exception);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if ((image->matte == MagickFalse) &&
(draw_info->fill.opacity != OpaqueOpacity))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
GetMagickPixelPacket(image,&fill);
GetMagickPixelPacket(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
while (s > segment_stack)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,
image->columns-x,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&fill);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(fill.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(fill.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(fill.blue));
if (((channel & OpacityChannel) != 0) ||
(draw_info->fill.opacity != OpaqueOpacity))
SetPixelOpacity(q,ClampToQuantum(fill.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(fill.index));
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelPacket *start_color,
% const PixelPacket *stop_color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% This provides a good example of making use of the DrawGradientImage
% function and the gradient structure in draw_info.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,
const PixelPacket *start_color,const PixelPacket *stop_color)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
ssize_t
i;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(start_color != (const PixelPacket *) NULL);
assert(stop_color != (const PixelPacket *) NULL);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=(MagickRealType) StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) (image->columns-1)*cosine)+
fabs((double) (image->rows-1)*sine);
gradient->gradient_vector.x1=0.5*((image->columns-1)-distance*cosine);
gradient->gradient_vector.y1=0.5*((image->rows-1)-distance*sine);
gradient->gradient_vector.x2=0.5*((image->columns-1)+distance*cosine);
gradient->gradient_vector.y2=0.5*((image->rows-1)+distance*sine);
}
gradient->radii.x=(double) MagickMax((image->columns-1),(image->rows-1))/2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) (MagickMax((image->columns-1),
(image->rows-1)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt((double) (image->columns-1)*
(image->columns-1)+(image->rows-1)*(image->rows-1)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) (image->columns-1)/2.0;
gradient->radii.y=(double) (image->rows-1)/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1),
(image->rows-1))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) MagickMin((image->columns-1),
(image->rows-1))/2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=2;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gradient->stops,0,gradient->number_stops*
sizeof(*gradient->stops));
for (i=0; i < (ssize_t) gradient->number_stops; i++)
GetMagickPixelPacket(image,&gradient->stops[i].color);
SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL,
&gradient->stops[0].color);
gradient->stops[0].offset=0.0;
SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL,
&gradient->stops[1].color);
gradient->stops[1].offset=1.0;
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,
sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) memset(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,
sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**magick_restrict histograms,
width;
ssize_t
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,0.5);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&paint_image->exception);
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict paint_indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
size_t
*histogram;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view);
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
v;
/*
Assign most frequent color.
*/
i=0;
j=0;
count=0;
(void) memset(histogram,0,NumberPaintBins*sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+u+i)));
histogram[k]++;
if (histogram[k] > count)
{
j=i+u;
count=histogram[k];
}
}
i+=(ssize_t) (linear_image->columns+width);
}
*q=(*(p+j));
if (linear_image->colorspace == CMYKColorspace)
SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,OilPaintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill,
% const MagickBooleanType invert)
% MagickBooleanType OpaquePaintImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill,
const MagickBooleanType invert)
{
return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert));
}
MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill,const MagickBooleanType invert)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (MagickPixelPacket *) NULL);
assert(fill != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
exception=(&image->exception);
ConformMagickPixelPacket(image,fill,&conform_fill,exception);
ConformMagickPixelPacket(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,&conform_target) != invert)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(conform_fill.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(conform_fill.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(conform_fill.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(conform_fill.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(conform_fill.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,OpaquePaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity,
const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, the
% TransparentPaintImage() API is not suitable for the operations like chroma,
% where the tolerance for similarity of two color component (RGB) can be
% different, Thus we define this method take two target pixels (one
% low and one hight) and all the pixels of an image which are lying between
% these two pixels are made transparent.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *low,const MagickPixelPacket *hight,
% const Quantum opacity,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const MagickPixelPacket *low,const MagickPixelPacket *high,
const Quantum opacity,const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (MagickPixelPacket *) NULL);
assert(low != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,ResetAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
MagickPixelPacket
pixel;
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse;
if (match != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
critical-unrelated.c | /*
* critical-unrelated.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
{
#pragma omp critical
{
// Dummy region.
}
var++;
}
fprintf(stderr, "DONE\n");
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}critical-unrelated.c:29
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}critical-unrelated.c:29
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
fill_ints.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <assert.h>
#include "config.h"
#include "cint.h"
#include "vhf/fblas.h"
#define INTBUFMAX 1000
#define INTBUFMAX10 8000
#define IMGBLK 80
#define OF_CMPLX 2
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define MAX(X,Y) ((X)>(Y)?(X):(Y))
int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
static int shloc_partition(int *kshloc, int *ao_loc, int ksh0, int ksh1, int dkmax)
{
int ksh;
int nloc = 0;
int loclast = ao_loc[ksh0];
kshloc[0] = ksh0;
for (ksh = ksh0+1; ksh < ksh1; ksh++) {
assert(ao_loc[ksh+1] - ao_loc[ksh] < dkmax);
if (ao_loc[ksh+1] - loclast > dkmax) {
nloc += 1;
kshloc[nloc] = ksh;
loclast = ao_loc[ksh];
}
}
nloc += 1;
kshloc[nloc] = ksh1;
return nloc;
}
static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL)
{
env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0];
env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1];
env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2];
}
static void sort3c_kks1(double complex *out, double *bufr, double *bufi,
int *kptij_idx, int *shls_slice, int *ao_loc,
int nkpts, int nkpts_ij, int comp, int ish, int jsh,
int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t njk = naoj * naok;
const size_t nijk = njk * naoi;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
out += (ip * naoj + jp) * naok;
int i, j, k, kk, ik, jk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts_ij; kk++) {
ik = kptij_idx[kk] / nkpts;
jk = kptij_idx[kk] % nkpts;
off = (ik*nkpts+jk) * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (j = 0; j < dj; j++) {
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I;
} }
pout += naok;
pbr += di;
pbi += di;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
static void _nr3c_fill_kk(int (*intor)(), void (*fsort)(),
double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const double ND1 = -1;
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
int dkmax = INTBUFMAX / dij;
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, m, msh0, msh1, dijm, dijmk;
int ksh, dk, iL0, iL1, iL, jL, iLcount, empty;
int shls[3];
double *bufkk_r, *bufkk_i, *bufkL_r, *bufkL_i, *bufL, *pbuf, *cache;
shls[0] = ish;
shls[1] = jsh;
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijm = dij * dkmax * comp;
dijmk = dijm * nkpts;
bufkk_r = buf;
bufkk_i = bufkk_r + (size_t)nkpts * dijmk;
bufkL_r = bufkk_i + (size_t)nkpts * dijmk;
bufkL_i = bufkL_r + (size_t)MIN(nimgs,IMGBLK) * dijmk;
bufL = bufkL_i + (size_t)MIN(nimgs,IMGBLK) * dijmk;
cache = bufL + (size_t)nimgs * dijm;
for (i = 0; i < nkpts*dijmk*OF_CMPLX; i++) {
bufkk_r[i] = 0;
}
for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) {
iLcount = MIN(IMGBLK, nimgs - iL0);
for (iL = iL0; iL < iL0+iLcount; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
pbuf = bufL;
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
empty = 0;
}
dk = ao_loc[ksh+1] - ao_loc[ksh];
pbuf += dij*dk * comp;
}
}
dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs,
&D1, bufL, &dijm, expkL_r, &nimgs,
&D0, bufkL_r+(iL-iL0)*(size_t)dijmk, &dijm);
dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs,
&D1, bufL, &dijm, expkL_i, &nimgs,
&D0, bufkL_i+(iL-iL0)*(size_t)dijmk, &dijm);
} // iL in range(0, nimgs)
// conj(exp(1j*dot(h,k)))
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&D1, bufkL_r, &dijmk, expkL_r+iL0, &nimgs,
&D1, bufkk_r, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&D1, bufkL_i, &dijmk, expkL_i+iL0, &nimgs,
&D1, bufkk_r, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&D1, bufkL_i, &dijmk, expkL_r+iL0, &nimgs,
&D1, bufkk_i, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&ND1, bufkL_r, &dijmk, expkL_i+iL0, &nimgs,
&D1, bufkk_i, &dijmk);
}
(*fsort)(out, bufkk_r, bufkk_i, kptij_idx, shls_slice,
ao_loc, nkpts, nkpts_ij, comp, ish, jsh,
msh0, msh1);
}
}
/* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */
void PBCnr3c_fill_kks1(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr3c_fill_kk(intor, &sort3c_kks1, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
static void sort3c_kks2_igtj(double complex *out, double *bufr, double *bufi,
int *kptij_idx, int *shls_slice, int *ao_loc,
int nkpts, int nkpts_ij, int comp, int ish, int jsh,
int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
assert(naoi == naoj);
const size_t njk = naoj * naok;
const size_t nijk = njk * naoi;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
double complex *outij = out + (ip * naoj + jp) * naok;
double complex *outji = out + (jp * naoj + ip) * naok;
int i, j, k, kk, ik, jk, ksh, ic, dk, dijk;
size_t offij, offji;
double *pbij_r, *pbij_i, *pbji_r, *pbji_i;
double complex *poutij, *poutji;
for (kk = 0; kk < nkpts_ij; kk++) {
ik = kptij_idx[kk] / nkpts;
jk = kptij_idx[kk] % nkpts;
offij = (ik*nkpts+jk) * dijmc;
offji = (jk*nkpts+ik) * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
poutij = outij + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
poutji = outji + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbij_r = bufr + offij + dijk*ic;
pbij_i = bufi + offij + dijk*ic;
pbji_r = bufr + offji + dijk*ic;
pbji_i = bufi + offji + dijk*ic;
for (j = 0; j < dj; j++) {
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
poutij[i*njk +k] = pbij_r[k*dij+i] + pbij_i[k*dij+i]*_Complex_I;
poutji[i*naok+k] = pbji_r[k*dij+i] - pbji_i[k*dij+i]*_Complex_I;
} }
poutij += naok;
poutji += njk;
pbij_r += di;
pbij_i += di;
pbji_r += di;
pbji_i += di;
}
}
offij += dijk * comp;
offji += dijk * comp;
}
outij += nijk * comp;
outji += nijk * comp;
}
}
/* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */
void PBCnr3c_fill_kks2(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_fill_kk(intor, &sort3c_kks2_igtj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_fill_kk(intor, &sort3c_kks1, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
}
static void sort3c_ks1(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int ish, int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t njk = naoj * naok;
const size_t nijk = njk * naoi;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
out += (ip * naoj + jp) * naok;
int i, j, k, kk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (j = 0; j < dj; j++) {
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I;
} }
pout += naok;
pbr += di;
pbi += di;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
/* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */
static void _nr3c_fill_k(int (*intor)(), void (*fsort)(),
double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const char TRANS_N = 'N';
const double D1 = 1;
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
int dkmax = INTBUFMAX10 / dij;
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, j, m, msh0, msh1, dijm;
size_t dijmk;
int ksh, dk, iL0, iL1, iL, jL, iLcount, empty;
int shls[3];
double *bufexp_r = buf;
double *bufexp_i = bufexp_r + nimgs * nkpts;
double *bufk_r = bufexp_i + nimgs * nkpts;
double *bufk_i, *bufL, *pbuf, *cache;
shls[0] = ish;
shls[1] = jsh;
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijm = dij * dkmax * comp;
dijmk = dijm * nkpts;
bufk_i = bufk_r + dijmk;
bufL = bufk_i + dijmk;
cache = bufL + nimgs * dijm;
for (i = 0; i < dijmk*OF_CMPLX; i++) {
bufk_r[i] = 0;
}
for (iL = 0; iL < nimgs; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
pbuf = bufL;
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
empty = 0;
}
dk = ao_loc[ksh+1] - ao_loc[ksh];
pbuf += dij*dk * comp;
}
}
// ('k,kL->kL', conj(expkL[iL]), expkL)
for (i = 0; i < nkpts; i++) {
for (j = 0; j < nimgs; j++) {
bufexp_r[i*nimgs+j] = expkL_r[i*nimgs+j] * expkL_r[i*nimgs+iL];
bufexp_r[i*nimgs+j]+= expkL_i[i*nimgs+j] * expkL_i[i*nimgs+iL];
bufexp_i[i*nimgs+j] = expkL_i[i*nimgs+j] * expkL_r[i*nimgs+iL];
bufexp_i[i*nimgs+j]-= expkL_r[i*nimgs+j] * expkL_i[i*nimgs+iL];
} }
dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs,
&D1, bufL, &dijm, bufexp_r, &nimgs, &D1, bufk_r, &dijm);
dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs,
&D1, bufL, &dijm, bufexp_i, &nimgs, &D1, bufk_i, &dijm);
} // iL in range(0, nimgs)
(*fsort)(out, bufk_r, bufk_i, shls_slice, ao_loc,
nkpts, comp, ish, jsh, msh0, msh1);
}
}
/* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */
void PBCnr3c_fill_ks1(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr3c_fill_k(intor, sort3c_ks1, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
static void sort3c_ks2_igtj(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int ish, int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, kk, ik, jk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I;
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
static void sort3c_ks2_ieqj(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int ish, int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, kk, ik, jk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I;
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
/* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */
void PBCnr3c_fill_ks2(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_fill_k(intor, &sort3c_ks2_igtj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_fill_k(intor, &sort3c_ks2_ieqj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
}
static void sort3c_gs1(double *out, double *in, int *shls_slice, int *ao_loc,
int comp, int ish, int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t njk = naoj * naok;
const size_t nijk = njk * naoi;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
out += (ip * naoj + jp) * naok;
int i, j, k, ij, ksh, ic, dk, dijk;
double *pin, *pout;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0];
pin = in + dijk * ic;
for (j = 0; j < dj; j++) {
for (i = 0; i < di; i++) {
for (k = 0; k < dk; k++) {
pout[i*njk+k] = pin[k*dij+i];
} }
pout += naok;
pin += di;
}
}
in += dijk * comp;
}
}
static void _nr3c_fill_g(int (*intor)(), void (*fsort)(), double *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
int dkmax = INTBUFMAX10 / dij / 2 * MIN(IMGBLK,nimgs);
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, j, m, msh0, msh1, dijm;
int ksh, dk, iL, jL, dijkc;
int shls[3];
int dijmc = dij * dkmax * comp;
double *bufL = buf + dijmc;
double *cache = bufL + dijmc;
double *pbuf;
shls[0] = ish;
shls[1] = jsh;
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijmc = dij * dkmax * comp;
for (i = 0; i < dijmc; i++) {
bufL[i] = 0;
}
for (iL = 0; iL < nimgs; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
pbuf = bufL;
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijkc = dij*dk * comp;
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
for (i = 0; i < dijkc; i++) {
pbuf[i] += buf[i];
}
}
pbuf += dijkc;
}
}
} // iL in range(0, nimgs)
(*fsort)(out, bufL, shls_slice, ao_loc, comp, ish, jsh, msh0, msh1);
}
}
/* ('...LM->...', int3c) */
void PBCnr3c_fill_gs1(int (*intor)(), double *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr3c_fill_g(intor, &sort3c_gs1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
static void sort3c_gs2_igtj(double *out, double *in, int *shls_slice, int *ao_loc,
int comp, int ish, int jsh, int msh0, int msh1)
{
double *out0 = out;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, ksh, ic, dk, dijk;
double *pin, *pout;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0];
pin = in + dijk * ic;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pin[k*dij+ij];
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
in += dijk * comp;
}
}
static void sort3c_gs2_ieqj(double *out, double *in, int *shls_slice, int *ao_loc,
int comp, int ish, int jsh, int msh0, int msh1)
{
double *out0 = out;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dij = di * di;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, ksh, ic, dk, dijk;
double *pin, *pout;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0];
pin = in + dijk * ic;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pin[k*dij+ij];
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
in += dijk * comp;
}
}
/* ('...LM->...', int3c) */
void PBCnr3c_fill_gs2(int (*intor)(), double *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_fill_g(intor, &sort3c_gs2_igtj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_fill_g(intor, &sort3c_gs2_ieqj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
}
int PBCsizeof_env(int *shls_slice,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
int ish, ia, np, nc;
int nenv = 0;
for (ish = ish0; ish < ish1; ish++) {
ia = bas[ATOM_OF +ish*BAS_SLOTS];
nenv = MAX(atm[PTR_COORD+ia*ATM_SLOTS]+3, nenv);
np = bas[NPRIM_OF+ish*BAS_SLOTS];
nc = bas[NCTR_OF +ish*BAS_SLOTS];
nenv = MAX(bas[PTR_EXP +ish*BAS_SLOTS]+np, nenv);
nenv = MAX(bas[PTR_COEFF+ish*BAS_SLOTS]+np*nc, nenv);
}
return nenv;
}
void PBCnr3c_drv(int (*intor)(), void (*fill)(), double complex *eri,
int nkpts_ij, int nkpts, int comp, int nimgs,
double *Ls, double complex *expkL, int *kptij_idx,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const int nksh = ksh1 - ksh0;
double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX);
double *expkL_i = expkL_r + nimgs*nkpts;
int i;
for (i = 0; i < nimgs*nkpts; i++) {
expkL_r[i] = creal(expkL[i]);
expkL_i[i] = cimag(expkL[i]);
}
size_t count;
if (fill == &PBCnr3c_fill_kks1 || fill == &PBCnr3c_fill_kks2) {
int dijk =(GTOmax_shell_dim(ao_loc, shls_slice+0, 1) *
GTOmax_shell_dim(ao_loc, shls_slice+2, 1) *
GTOmax_shell_dim(ao_loc, shls_slice+4, 1));
count = nkpts*nkpts * OF_CMPLX +
nkpts*MIN(nimgs,IMGBLK) * OF_CMPLX + nimgs;
// MAX(INTBUFMAX, dijk) to ensure buffer is enough for at least one (i,j,k) shell
count*= MAX(INTBUFMAX, dijk) * comp;
} else {
count = (nkpts * OF_CMPLX + nimgs) * INTBUFMAX10 * comp;
count+= nimgs * nkpts * OF_CMPLX;
}
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, fill, eri, nkpts_ij, nkpts, comp, nimgs, \
Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, \
atm, natm, bas, nbas, env, count)
{
int ish, jsh, ij, i;
int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env);
nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env));
nenv = MAX(nenv, PBCsizeof_env(shls_slice+4, atm, natm, bas, nbas, env));
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
double *buf = malloc(sizeof(double)*(count+cache_size));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
(*fill)(intor, eri, nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(expkL_r);
}
static void sort2c_ks1(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t nij = naoi * naoj;
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dimax = ao_loc[msh1] - ao_loc[msh0];
const size_t dmjc = dimax * dj * comp;
out += jp;
int i, j, kk, ish, ic, di, dij;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dmjc;
for (ish = msh0; ish < msh1; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
dij = di * dj;
for (ic = 0; ic < comp; ic++) {
pout = out + nij*ic + naoj*(ao_loc[ish]-ao_loc[ish0]);
pbr = bufr + off + dij*ic;
pbi = bufi + off + dij*ic;
for (j = 0; j < dj; j++) {
for (i = 0; i < di; i++) {
pout[i*naoj+j] = pbr[j*di+i] + pbi[j*di+i]*_Complex_I;
}
}
}
off += dij * comp;
}
out += nij * comp;
}
}
static void _nr2c_fill(int (*intor)(), double complex *out,
int nkpts, int comp, int nimgs, int jsh, int ish0,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const char TRANS_N = 'N';
const double D1 = 1;
const double D0 = 0;
ish0 += shls_slice[0];
jsh += jsh0;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int dimax = INTBUFMAX10 / dj;
int ishloc[ish1-ish0+1];
int nishloc = shloc_partition(ishloc, ao_loc, ish0, ish1, dimax);
int i, j, m, msh0, msh1, dmjc, ish, di;
int jL, empty;
int shls[2];
double *bufk_r = buf;
double *bufk_i, *bufL, *pbuf, *cache;
shls[1] = jsh;
for (m = 0; m < nishloc; m++) {
msh0 = ishloc[m];
msh1 = ishloc[m+1];
dimax = ao_loc[msh1] - ao_loc[msh0];
dmjc = dj * dimax * comp;
bufk_i = bufk_r + dmjc * nkpts;
bufL = bufk_i + dmjc * nkpts;
cache = bufL + dmjc * nimgs;
pbuf = bufL;
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
for (ish = msh0; ish < msh1; ish++) {
shls[0] = ish;
di = ao_loc[ish+1] - ao_loc[ish];
if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
empty = 0;
}
pbuf += di * dj * comp;
}
}
dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs,
&D1, bufL, &dmjc, expkL_r, &nimgs, &D0, bufk_r, &dmjc);
dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs,
&D1, bufL, &dmjc, expkL_i, &nimgs, &D0, bufk_i, &dmjc);
sort2c_ks1(out, bufk_r, bufk_i, shls_slice, ao_loc,
nkpts, comp, jsh, msh0, msh1);
}
}
/* ('...M,kL->...k', int3c, exp_kL, exp_kL) */
void PBCnr2c_fill_ks1(int (*intor)(), double complex *out,
int nkpts, int comp, int nimgs, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, 0,
buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc,
cintopt, atm, natm, bas, nbas, env);
}
void PBCnr2c_fill_ks2(int (*intor)(), double complex *out,
int nkpts, int comp, int nimgs, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc,
cintopt, atm, natm, bas, nbas, env);
}
void PBCnr2c_drv(int (*intor)(), void (*fill)(), double complex *out,
int nkpts, int comp, int nimgs,
double *Ls, double complex *expkL,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int njsh = jsh1 - jsh0;
double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX);
double *expkL_i = expkL_r + nimgs*nkpts;
int i;
for (i = 0; i < nimgs*nkpts; i++) {
expkL_r[i] = creal(expkL[i]);
expkL_i[i] = cimag(expkL[i]);
}
const int cache_size = GTOmax_cache_size(intor, shls_slice, 2,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, fill, out, nkpts, comp, nimgs, \
Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, \
atm, natm, bas, nbas, env)
{
int jsh;
int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env);
nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env));
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
size_t count = nkpts * OF_CMPLX + nimgs;
double *buf = malloc(sizeof(double)*(count*INTBUFMAX10*comp+cache_size));
#pragma omp for schedule(dynamic)
for (jsh = 0; jsh < njsh; jsh++) {
(*fill)(intor, out, nkpts, comp, nimgs, jsh,
buf, env_loc, Ls, expkL_r, expkL_i,
shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(expkL_r);
}
|
DRB109-orderedmissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
/* This is a program based on a test contributed by Yizi Gu@Rice Univ.
* Missing the ordered clause
* Data race pair: x@56:5 vs. x@56:5
* */
int main()
{
omprace_init();
int x =0;
#pragma omp parallel for ordered
for (int i = 0; i < 100; ++i) {
x++;
}
printf ("x=%d\n",x);
omprace_fini();
return 0;
}
|
PerturbField.c | // Re-write of perturb_field.c for being accessible within the MCMC
int ComputePerturbField(
float redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params,
struct InitialConditions *boxes, struct PerturbedField *perturbed_field
){
/*
ComputePerturbField uses the first-order Langragian displacement field to move the
masses in the cells of the density field. The high-res density field is extrapolated
to some high-redshift (global_params.INITIAL_REDSHIFT), then uses the zeldovich
approximation to move the grid "particles" onto the lower-res grid we use for the
maps. Then we recalculate the velocity fields on the perturbed grid.
*/
int status;
Try{ // This Try{} wraps the whole function, so we don't indent.
// Makes the parameter structs visible to a variety of functions/macros
// Do each time to avoid Python garbage collection issues
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
omp_set_num_threads(user_params->N_THREADS);
fftwf_complex *HIRES_density_perturb, *HIRES_density_perturb_saved;
fftwf_complex *LOWRES_density_perturb, *LOWRES_density_perturb_saved;
float growth_factor, displacement_factor_2LPT, init_growth_factor, init_displacement_factor_2LPT, xf, yf, zf;
float mass_factor, dDdt, f_pixel_factor, velocity_displacement_factor, velocity_displacement_factor_2LPT;
unsigned long long ct, HII_i, HII_j, HII_k;
int i,j,k, xi, yi, zi, dimension, switch_mid;
double ave_delta, new_ave_delta;
// Function for deciding the dimensions of loops when we could
// use either the low or high resolution grids.
switch(user_params->PERTURB_ON_HIGH_RES) {
case 0:
dimension = user_params->HII_DIM;
switch_mid = HII_MIDDLE;
break;
case 1:
dimension = user_params->DIM;
switch_mid = MIDDLE;
break;
}
// *************** BEGIN INITIALIZATION ************************** //
// perform a very rudimentary check to see if we are underresolved and not using the linear approx
if ((user_params->BOX_LEN > user_params->DIM) && !(global_params.EVOLVE_DENSITY_LINEARLY)){
LOG_WARNING("Resolution is likely too low for accurate evolved density fields\n \
It is recommended that you either increase the resolution (DIM/Box_LEN) or set the EVOLVE_DENSITY_LINEARLY flag to 1\n");
}
growth_factor = dicke(redshift);
displacement_factor_2LPT = -(3.0/7.0) * growth_factor*growth_factor; // 2LPT eq. D8
dDdt = ddickedt(redshift); // time derivative of the growth factor (1/s)
init_growth_factor = dicke(global_params.INITIAL_REDSHIFT);
init_displacement_factor_2LPT = -(3.0/7.0) * init_growth_factor*init_growth_factor; // 2LPT eq. D8
// find factor of HII pixel size / deltax pixel size
f_pixel_factor = user_params->DIM/(float)(user_params->HII_DIM);
mass_factor = pow(f_pixel_factor, 3);
// allocate memory for the updated density, and initialize
LOWRES_density_perturb = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
LOWRES_density_perturb_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
if(user_params->PERTURB_ON_HIGH_RES) {
HIRES_density_perturb = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
HIRES_density_perturb_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
}
double *resampled_box;
// check if the linear evolution flag was set
if (global_params.EVOLVE_DENSITY_LINEARLY){
LOG_DEBUG("Linearly evolve density field");
#pragma omp parallel shared(growth_factor,boxes,LOWRES_density_perturb,HIRES_density_perturb,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
*((float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k)) = growth_factor*boxes->hires_density[R_INDEX(i,j,k)];
}
else {
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = growth_factor*boxes->lowres_density[HII_R_INDEX(i,j,k)];
}
}
}
}
}
}
else {
// Apply Zel'dovich/2LPT correction
LOG_DEBUG("Apply Zel'dovich");
#pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
*((float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k)) = 0.;
}
else {
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = 0.;
}
}
}
}
}
velocity_displacement_factor = (growth_factor-init_growth_factor) / user_params->BOX_LEN;
// now add the missing factor of D
#pragma omp parallel shared(boxes,velocity_displacement_factor,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->hires_vy[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->hires_vz[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
}
else {
boxes->lowres_vx[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->lowres_vy[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->lowres_vz[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
}
}
}
}
}
// * ************************************************************************* * //
// * BEGIN 2LPT PART * //
// * ************************************************************************* * //
// reference: reference: Scoccimarro R., 1998, MNRAS, 299, 1097-1118 Appendix D
if(global_params.SECOND_ORDER_LPT_CORRECTIONS){
LOG_DEBUG("Apply 2LPT");
// allocate memory for the velocity boxes and read them in
velocity_displacement_factor_2LPT = (displacement_factor_2LPT - init_displacement_factor_2LPT) / user_params->BOX_LEN;
// now add the missing factor in eq. D9
#pragma omp parallel shared(boxes,velocity_displacement_factor_2LPT,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->hires_vy_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->hires_vz_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
}
else {
boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
}
}
}
}
}
}
// * ************************************************************************* * //
// * END 2LPT PART * //
// * ************************************************************************* * //
// ************ END INITIALIZATION **************************** //
// Perturbing the density field required adding over multiple cells. Store intermediate result as a double to avoid rounding errors
if(user_params->PERTURB_ON_HIGH_RES) {
resampled_box = (double *)calloc(TOT_NUM_PIXELS,sizeof(double));
}
else {
resampled_box = (double *)calloc(HII_TOT_NUM_PIXELS,sizeof(double));
}
// go through the high-res box, mapping the mass onto the low-res (updated) box
LOG_DEBUG("Perturb the density field");
#pragma omp parallel shared(init_growth_factor,boxes,f_pixel_factor,resampled_box,dimension) \
private(i,j,k,xi,xf,yi,yf,zi,zf,HII_i,HII_j,HII_k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM;i++){
for (j=0; j<user_params->DIM;j++){
for (k=0; k<user_params->DIM;k++){
// map indeces to locations in units of box size
xf = (i+0.5)/((user_params->DIM)+0.0);
yf = (j+0.5)/((user_params->DIM)+0.0);
zf = (k+0.5)/((user_params->DIM)+0.0);
// update locations
if(user_params->PERTURB_ON_HIGH_RES) {
xf += (boxes->hires_vx)[R_INDEX(i, j, k)];
yf += (boxes->hires_vy)[R_INDEX(i, j, k)];
zf += (boxes->hires_vz)[R_INDEX(i, j, k)];
}
else {
HII_i = (unsigned long long)(i/f_pixel_factor);
HII_j = (unsigned long long)(j/f_pixel_factor);
HII_k = (unsigned long long)(k/f_pixel_factor);
xf += (boxes->lowres_vx)[HII_R_INDEX(HII_i, HII_j, HII_k)];
yf += (boxes->lowres_vy)[HII_R_INDEX(HII_i, HII_j, HII_k)];
zf += (boxes->lowres_vz)[HII_R_INDEX(HII_i, HII_j, HII_k)];
}
// 2LPT PART
// add second order corrections
if(global_params.SECOND_ORDER_LPT_CORRECTIONS){
if(user_params->PERTURB_ON_HIGH_RES) {
xf -= (boxes->hires_vx_2LPT)[R_INDEX(i,j,k)];
yf -= (boxes->hires_vy_2LPT)[R_INDEX(i,j,k)];
zf -= (boxes->hires_vz_2LPT)[R_INDEX(i,j,k)];
}
else {
xf -= (boxes->lowres_vx_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)];
yf -= (boxes->lowres_vy_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)];
zf -= (boxes->lowres_vz_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)];
}
}
xf *= (float)(dimension);
yf *= (float)(dimension);
zf *= (float)(dimension);
while (xf >= (float)(dimension)){ xf -= (dimension);}
while (xf < 0){ xf += (dimension);}
while (yf >= (float)(dimension)){ yf -= (dimension);}
while (yf < 0){ yf += (dimension);}
while (zf >= (float)(dimension)){ zf -= (dimension);}
while (zf < 0){ zf += (dimension);}
xi = xf;
yi = yf;
zi = zf;
if (xi >= (dimension)){ xi -= (dimension);}
if (xi < 0) {xi += (dimension);}
if (yi >= (dimension)){ yi -= (dimension);}
if (yi < 0) {yi += (dimension);}
if (zi >= (dimension)){ zi -= (dimension);}
if (zi < 0) {zi += (dimension);}
if(user_params->PERTURB_ON_HIGH_RES) {
#pragma omp atomic
resampled_box[R_INDEX(xi,yi,zi)] += (double)(1. + init_growth_factor*(boxes->hires_density)[R_INDEX(i,j,k)]);
}
else {
#pragma omp atomic
resampled_box[HII_R_INDEX(xi,yi,zi)] += (double)(1. + init_growth_factor*(boxes->hires_density)[R_INDEX(i,j,k)]);
}
}
}
}
}
// Resample back to a float for remaining algorithm
#pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,resampled_box,dimension) \
private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
*( (float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k) ) = (float)resampled_box[R_INDEX(i,j,k)];
}
else {
*( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) = (float)resampled_box[HII_R_INDEX(i,j,k)];
}
}
}
}
}
free(resampled_box);
LOG_DEBUG("Finished perturbing the density field");
// deallocate
#pragma omp parallel shared(boxes,velocity_displacement_factor,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->hires_vy[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->hires_vz[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
}
else {
boxes->lowres_vx[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->lowres_vy[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->lowres_vz[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
}
}
}
}
}
if(global_params.SECOND_ORDER_LPT_CORRECTIONS){
#pragma omp parallel shared(boxes,velocity_displacement_factor_2LPT,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->hires_vy_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->hires_vz_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
}
else {
boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
}
}
}
}
}
}
LOG_DEBUG("Cleanup velocities for perturb");
}
// Now, if I still have the high resolution density grid (HIRES_density_perturb) I need to downsample it to the low-resolution grid
if(user_params->PERTURB_ON_HIGH_RES) {
LOG_DEBUG("Downsample the high-res perturbed density");
// Transform to Fourier space to sample (filter) the box
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb);
// Need to save a copy of the high-resolution unfiltered density field for the velocities
memcpy(HIRES_density_perturb_saved, HIRES_density_perturb, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Now filter the box
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_density_perturb, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
// FFT back to real space
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb);
// Renormalise the FFT'd box
#pragma omp parallel shared(HIRES_density_perturb,LOWRES_density_perturb,f_pixel_factor,mass_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) =
*((float *)HIRES_density_perturb + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)))/(float)TOT_NUM_PIXELS;
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) -= 1.;
if (*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) < -1) {
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = -1.+FRACT_FLOAT_ERR;
}
}
}
}
}
}
else {
if (!global_params.EVOLVE_DENSITY_LINEARLY){
#pragma omp parallel shared(LOWRES_density_perturb,mass_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) /= mass_factor;
*( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) -= 1.;
}
}
}
}
}
}
// transform to k-space
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb);
//smooth the field
if (!global_params.EVOLVE_DENSITY_LINEARLY && global_params.SMOOTH_EVOLVED_DENSITY_FIELD){
filter_box(LOWRES_density_perturb, 1, 2, global_params.R_smooth_density*user_params->BOX_LEN/(float)user_params->HII_DIM);
}
// save a copy of the k-space density field
memcpy(LOWRES_density_perturb_saved, LOWRES_density_perturb, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb);
// normalize after FFT
#pragma omp parallel shared(LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for(i=0; i<user_params->HII_DIM; i++){
for(j=0; j<user_params->HII_DIM; j++){
for(k=0; k<user_params->HII_DIM; k++){
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) /= (float)HII_TOT_NUM_PIXELS;
if (*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) < -1) // shouldn't happen
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = -1+FRACT_FLOAT_ERR;
}
}
}
}
#pragma omp parallel shared(perturbed_field,LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)perturbed_field->density + HII_R_INDEX(i,j,k)) = *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k));
}
}
}
}
// **** Convert to velocities ***** //
LOG_DEBUG("Generate velocity fields");
float k_x, k_y, k_z, k_sq, dDdt_over_D;
int n_x, n_y, n_z;
dDdt_over_D = dDdt/growth_factor;
if(user_params->PERTURB_ON_HIGH_RES) {
// We are going to generate the velocity field on the high-resolution perturbed density grid
memcpy(HIRES_density_perturb, HIRES_density_perturb_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
}
else {
// We are going to generate the velocity field on the low-resolution perturbed density grid
memcpy(LOWRES_density_perturb, LOWRES_density_perturb_saved, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
}
#pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,dDdt_over_D,dimension,switch_mid) \
private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<dimension; n_x++){
if (n_x>switch_mid)
k_x =(n_x-dimension) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<dimension; n_y++){
if (n_y>switch_mid)
k_y =(n_y-dimension) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=switch_mid; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)) { // DC mode
if(user_params->PERTURB_ON_HIGH_RES) {
HIRES_density_perturb[0] = 0;
}
else {
LOWRES_density_perturb[0] = 0;
}
}
else{
if(user_params->PERTURB_ON_HIGH_RES) {
HIRES_density_perturb[C_INDEX(n_x,n_y,n_z)] *= dDdt_over_D*k_z*I/k_sq/(TOT_NUM_PIXELS+0.0);
}
else {
LOWRES_density_perturb[HII_C_INDEX(n_x,n_y,n_z)] *= dDdt_over_D*k_z*I/k_sq/(HII_TOT_NUM_PIXELS+0.0);
}
}
}
}
}
}
if(user_params->PERTURB_ON_HIGH_RES) {
// smooth the high resolution field ready for resampling
if (user_params->DIM != user_params->HII_DIM)
filter_box(HIRES_density_perturb, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb);
#pragma omp parallel shared(perturbed_field,HIRES_density_perturb,f_pixel_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)perturbed_field->velocity + HII_R_INDEX(i,j,k)) = *((float *)HIRES_density_perturb + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5)));
}
}
}
}
}
else {
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb);
#pragma omp parallel shared(perturbed_field,LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)perturbed_field->velocity + HII_R_INDEX(i,j,k)) = *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k));
}
}
}
}
}
fftwf_cleanup_threads();
fftwf_cleanup();
fftwf_forget_wisdom();
// deallocate
fftwf_free(LOWRES_density_perturb);
fftwf_free(LOWRES_density_perturb_saved);
if(user_params->PERTURB_ON_HIGH_RES) {
fftwf_free(HIRES_density_perturb);
fftwf_free(HIRES_density_perturb_saved);
}
fftwf_cleanup();
} // End of Try{}
Catch(status){
return(status);
}
return(0);
}
|
GB_binop__first_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_bool)
// A*D function (colscale): GB (_AxD__first_bool)
// D*A function (rowscale): GB (_DxB__first_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__first_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__first_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_bool)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = aij
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_BOOL || GxB_NO_FIRST_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__first_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
parallel.c | #include<assert.h>
int main(void)
{
int i =100;
int j = 77;
#pragma omp parallel firstprivate(i)
{
assert(i == 100);
assert(j == 77);
}
#pragma omp parallel private(i)
{
assert(i != 100);
}
return 0;
}
|
a3.c | #include "omp.h"
void axpy(int N, float *Y, float *X, float a) {
int i;
#pragma omp target enter data map(to:X[0:N])
#pragma omp parallel for
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
|
DRB003-antidep2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A two-level loop nest with loop carried anti-dependence on the outer level.
Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc,char *argv[])
{
int i, j;
int len = 20;
double a[20][20];
#pragma omp target data map(from:a[0:20][0:20])
#pragma omp target parallel for private(j)
for (i=0; i< len; i++)
for (j=0; j<len; j++)
a[i][j] = (i * len + j + 0.5);
#pragma omp target data map(tofrom:a[0:20][0:20])
for (i = 0; i < len - 1; i += 1) {
#pragma omp target parallel for
for (j = 0; j < len ; j += 1) {
a[i][j] += a[i + 1][j];
}
}
for (i=0; i< len; i++)
for (j=0; j<len; j++)
printf("%lf",a[i][j]);
printf ("a[10][10]=%f\n", a[10][10]);
return 0;
}
|
VerletClusterCells.h | /**
* @file VerletClusterCells.h
* @author jspahl
* @date 25.3.19
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <vector>
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/CellBorderAndFlagManager.h"
#include "autopas/containers/ParticleContainer.h"
#include "autopas/containers/cellPairTraversals/CellPairTraversal.h"
#include "autopas/containers/verletClusterLists/VerletClusterCellsParticleIterator.h"
#include "autopas/containers/verletClusterLists/traversals/VerletClusterTraversalInterface.h"
#include "autopas/iterators/ParticleIterator.h"
#include "autopas/iterators/RegionParticleIterator.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/CudaDeviceVector.h"
namespace autopas {
/**
* Particles are divided into clusters.
* The VerletClusterCells class uses neighborhood lists for each cluster pair
* to calculate pairwise interactions.
* It is optimized for a constant, i.e. particle independent, cutoff radius of
* the interaction.
* @tparam Particle
*/
template <class Particle>
class VerletClusterCells : public ParticleContainer<FullParticleCell<Particle>> {
public:
/**
* Constructor of the VerletClusterCells class.
* The neighbor lists are build using an estimated density.
* The box is divided into cuboids with roughly the
* same side length. The rebuildFrequency should be chosen, s.t. the particles do
* not move more than a distance of skin/2 between two rebuilds of the lists.
* @param boxMin the lower corner of the domain
* @param boxMax the upper corner of the domain
* @param cutoff the cutoff radius of the interaction
* @param skin the skin radius
* @param clusterSize size of clusters
*/
VerletClusterCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff,
double skin = 0, int clusterSize = 32)
: ParticleContainer<FullParticleCell<Particle>>(boxMin, boxMax, cutoff, skin),
_boxMinWithHalo(utils::ArrayMath::subScalar(boxMin, cutoff + skin)),
_boxMaxWithHalo(utils::ArrayMath::addScalar(boxMax, cutoff + skin)),
_clusterSize(clusterSize),
_isValid(false) {
this->_cells.resize(1);
_dummyStarts = {0};
}
ContainerOption getContainerType() const override { return ContainerOption::verletClusterCells; }
/**
* Function to iterate over all pairs of particles.
* This function only handles short-range interactions.
* @param traversal to be used used
*/
void iteratePairwise(TraversalInterface *traversal) override {
auto *traversalInterface = dynamic_cast<VerletClusterTraversalInterface<FullParticleCell<Particle>> *>(traversal);
auto *cellPairTraversal = dynamic_cast<CellPairTraversal<FullParticleCell<Particle>> *>(traversal);
if ((!traversalInterface) or (!cellPairTraversal)) {
autopas::utils::ExceptionHandler::exception(
"trying to use a traversal of wrong type in VerletClusterCells::iteratePairwise");
}
traversalInterface->setVerletListPointer(&_neighborCellIds, &_neighborMatrixDim, &_neighborMatrix);
if (traversalInterface->getSignature() != _lastTraversalSig or (not _isValid)) {
if (!_isValid) {
rebuild();
}
traversalInterface->rebuildVerlet(_cellsPerDim, this->_cells, _boundingBoxes,
std::ceil(this->getInteractionLength() * _gridSideLengthReciprocal),
this->getInteractionLength());
_lastTraversalSig = traversalInterface->getSignature();
}
cellPairTraversal->setCellsToTraverse(this->_cells);
traversal->initTraversal();
traversal->traverseParticlePairs();
traversal->endTraversal();
}
/**
* @copydoc VerletLists::addParticle()
*/
void addParticle(const Particle &p) override {
if (autopas::utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) {
_isValid = false;
// removes dummy particles in first cell
this->_cells[0].resize(_dummyStarts[0]);
// add particle somewhere, because lists will be rebuild anyways
this->_cells[0].addParticle(p);
++_dummyStarts[0];
} else {
utils::ExceptionHandler::exception(
"VerletCluster: trying to add particle that is not inside the bounding box.\n" + p.toString());
}
}
/**
* @copydoc VerletLists::addHaloParticle()
*/
void addHaloParticle(const Particle &haloParticle) override {
Particle p_copy = haloParticle;
if (autopas::utils::notInBox(p_copy.getR(), this->getBoxMin(), this->getBoxMax())) {
_isValid = false;
// removes dummy particles in first cell
this->_cells[0].resize(_dummyStarts[0]);
p_copy.setOwned(false);
// add particle somewhere, because lists will be rebuild anyways
this->_cells[0].addParticle(p_copy);
++_dummyStarts[0];
} else {
utils::ExceptionHandler::exception(
"VerletCluster: trying to add halo particle that is inside the bounding box.\n" + haloParticle.toString());
}
}
/**
* Update a halo particle of the container with the given haloParticle.
* @param haloParticle Particle to be updated.
* @return Returns true if the particle was updated, false if no particle could be found.
*/
bool updateHaloParticle(const Particle &haloParticle) override {
Particle pCopy = haloParticle;
pCopy.setOwned(false);
for (auto it = getRegionIterator(utils::ArrayMath::subScalar(pCopy.getR(), this->getSkin() / 2),
utils::ArrayMath::addScalar(pCopy.getR(), this->getSkin() / 2),
IteratorBehavior::haloOnly);
it.isValid(); ++it) {
if (pCopy.getID() == it->getID()) {
*it = pCopy;
return true;
}
}
return false;
}
/**
* Rebuilds the neighbor lists.
* @param traversal The used traversal.
*/
void rebuildNeighborLists(TraversalInterface *traversal) override {
auto *traversalInterface = dynamic_cast<VerletClusterTraversalInterface<FullParticleCell<Particle>> *>(traversal);
if (!traversalInterface) {
autopas::utils::ExceptionHandler::exception(
"trying to use a traversal of wrong type in VerletClusterCells::iteratePairwise");
}
if (not _isValid) {
rebuild();
}
traversalInterface->setVerletListPointer(&_neighborCellIds, &_neighborMatrixDim, &_neighborMatrix);
traversalInterface->rebuildVerlet(_cellsPerDim, this->_cells, _boundingBoxes,
std::ceil(this->getInteractionLength() * _gridSideLengthReciprocal),
this->getInteractionLength());
_lastTraversalSig = traversalInterface->getSignature();
}
/**
* @copydoc VerletLists::deleteHaloParticles
*/
void deleteHaloParticles() override {
_isValid = false;
for (size_t i = 0; i < this->_cells.size(); ++i) {
for (size_t j = 0; j < _dummyStarts[i];) {
if (not this->_cells[i]._particles[j].isOwned()) {
// set position outside the domain with other dummy particles
auto pos = this->_cells[i]._particles[j].getR();
pos[0] += _boxMaxWithHalo[2] + 8 * this->getInteractionLength();
this->_cells[i]._particles[j].setR(pos);
// one more dummy particle
--_dummyStarts[i];
// swap last non dummy particle with the halo particle to remove
std::swap(this->_cells[i]._particles[j], this->_cells[i]._particles[_dummyStarts[i]]);
} else {
// move on if no halo particle was removed
++j;
}
}
}
}
/**
* @copydoc VerletLists::updateContainer()
*/
std::vector<Particle> updateContainer() override {
AutoPasLog(debug, "updating container");
deleteHaloParticles();
std::vector<Particle> outsideParticles;
for (auto iter = begin(autopas::IteratorBehavior::ownedOnly); iter.isValid(); ++iter) {
if (utils::notInBox(iter->getR(), this->getBoxMin(), this->getBoxMax())) {
outsideParticles.push_back(*iter);
internal::deleteParticle(iter);
}
}
return outsideParticles;
}
bool isContainerUpdateNeeded() const override {
if (not _isValid) {
return true;
}
for (size_t i = 0; i < this->_cells.size(); ++i) {
size_t pid = 0;
const size_t end = (_boundingBoxes[i].size() > 0) ? _boundingBoxes[i].size() - 1 : 0;
for (size_t cid = 0; cid < end; ++cid) {
for (unsigned int pic = 0; pic < _clusterSize; ++pic) {
if (not particleInSkinOfBox(_boundingBoxes[i][cid], this->_cells[i][pid])) {
return true;
}
++pid;
}
}
for (unsigned int pic = 0; pic < _clusterSize && pid < _dummyStarts[i]; ++pic) {
if (not particleInSkinOfBox(_boundingBoxes[i][_boundingBoxes[i].size() - 1], this->_cells[i][pid])) {
return true;
}
++pid;
}
}
return false;
}
TraversalSelectorInfo getTraversalSelectorInfo() const override {
return TraversalSelectorInfo(_cellsPerDim, this->getInteractionLength(),
{_gridSideLength, _gridSideLength, this->getBoxMax()[2] - this->getBoxMin()[2]},
_clusterSize);
}
ParticleIteratorWrapper<Particle, true> begin(IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
return ParticleIteratorWrapper<Particle, true>(
new internal::VerletClusterCellsParticleIterator<Particle, FullParticleCell<Particle>, true>(
&this->_cells, _dummyStarts, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior));
}
ParticleIteratorWrapper<Particle, false> begin(
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override {
return ParticleIteratorWrapper<Particle, false>(
new internal::VerletClusterCellsParticleIterator<Particle, FullParticleCell<Particle>, false>(
&this->_cells, _dummyStarts, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior));
}
ParticleIteratorWrapper<Particle, true> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
// Special iterator requires sorted cells
#ifdef AUTOPAS_OPENMP
#pragma omp single
#endif
if (not _isValid) {
rebuild();
}
// there is an implicit barrier at end of single!
// restrict search area to the region where particles are
const auto lowerCornerInBounds = utils::ArrayMath::max(lowerCorner, _boxMinWithHalo);
const auto upperCornerInBounds = utils::ArrayMath::min(higherCorner, _boxMaxWithHalo);
// Find cells intersecting the search region
size_t xmin = (size_t)((lowerCornerInBounds[0] - _boxMinWithHalo[0] - this->getSkin()) * _gridSideLengthReciprocal);
size_t ymin = (size_t)((lowerCornerInBounds[1] - _boxMinWithHalo[1] - this->getSkin()) * _gridSideLengthReciprocal);
size_t xlength =
((size_t)((upperCornerInBounds[0] - _boxMinWithHalo[0] + this->getSkin()) * _gridSideLengthReciprocal) - xmin) +
1;
size_t ylength =
((size_t)((upperCornerInBounds[1] - _boxMinWithHalo[1] + this->getSkin()) * _gridSideLengthReciprocal) - ymin) +
1;
std::vector<size_t> cellsOfInterest(xlength * ylength);
auto cellsOfInterestIterator = cellsOfInterest.begin();
int start = xmin + ymin * _cellsPerDim[0];
for (size_t i = 0; i < ylength; ++i) {
std::iota(cellsOfInterestIterator, cellsOfInterestIterator + xlength, start + i * _cellsPerDim[0]);
cellsOfInterestIterator += xlength;
}
return ParticleIteratorWrapper<Particle, true>(
new internal::VerletClusterCellsRegionParticleIterator<Particle, FullParticleCell<Particle>, true>(
&this->_cells, _dummyStarts, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest,
_boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior, this->getSkin()));
}
ParticleIteratorWrapper<Particle, false> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override {
// restrict search area to the region where particles are
const auto lowerCornerInBounds = utils::ArrayMath::max(lowerCorner, _boxMinWithHalo);
const auto upperCornerInBounds = utils::ArrayMath::min(higherCorner, _boxMaxWithHalo);
// Special iterator requires sorted cells.
// Otherwise all cells are traversed with the general Iterator.
if (_isValid) {
// Find cells intersecting the search region
size_t xmin =
(size_t)((lowerCornerInBounds[0] - _boxMinWithHalo[0] - this->getSkin()) * _gridSideLengthReciprocal);
size_t ymin =
(size_t)((lowerCornerInBounds[1] - _boxMinWithHalo[1] - this->getSkin()) * _gridSideLengthReciprocal);
size_t xlength =
(((upperCornerInBounds[0] - _boxMinWithHalo[0] + this->getSkin()) * _gridSideLengthReciprocal) - xmin) + 1;
size_t ylength =
(((upperCornerInBounds[1] - _boxMinWithHalo[1] + this->getSkin()) * _gridSideLengthReciprocal) - ymin) + 1;
std::vector<size_t> cellsOfInterest(xlength * ylength);
auto cellsOfInterestIterator = cellsOfInterest.begin();
int start = xmin + ymin * _cellsPerDim[0];
for (size_t i = 0; i < ylength; ++i) {
std::iota(cellsOfInterestIterator, cellsOfInterestIterator + xlength, start + i * _cellsPerDim[0]);
cellsOfInterestIterator += xlength;
}
return ParticleIteratorWrapper<Particle, false>(
new internal::VerletClusterCellsRegionParticleIterator<Particle, FullParticleCell<Particle>, false>(
&this->_cells, _dummyStarts, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest,
_boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior, this->getSkin()));
} else {
// check all cells
// As dummy particles are outside the domain they are only found if the search region is outside the domain.
std::vector<size_t> cellsOfInterest(this->_cells.size());
std::iota(cellsOfInterest.begin(), cellsOfInterest.end(), 0);
return ParticleIteratorWrapper<Particle, false>(
new internal::RegionParticleIterator<Particle, FullParticleCell<Particle>, false>(
&this->_cells, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest, nullptr, behavior));
}
}
/**
* Get the number of particles excluding dummy Particles saved in the container.
* @return Number of particles in the container.
*/
unsigned long getNumParticles() const override {
size_t numParticles = 0ul;
#ifdef AUTOPAS_OPENMP
// @todo: find a sensible value for magic number
// numThreads should be at least 1 and maximal max_threads
int numThreads = std::max(1, std::min(omp_get_max_threads(), (int)(this->_cells.size() / 1000)));
AutoPasLog(trace, "Using {} threads", numThreads);
#pragma omp parallel for num_threads(numThreads) reduction(+ : numParticles)
#endif
for (size_t index = 0; index < _dummyStarts.size(); ++index) {
numParticles += _dummyStarts[index];
}
return numParticles;
}
/**
* Deletes all particles from the container.
*/
void deleteAllParticles() override {
_isValid = false;
std::fill(_dummyStarts.begin(), _dummyStarts.end(), 0);
ParticleContainer<FullParticleCell<Particle>>::deleteAllParticles();
}
/**
* Deletes all Dummy Particles in the container
*/
void deleteDummyParticles() {
for (size_t i = 0; i < this->_cells.size(); ++i) {
this->_cells[i].resize(_dummyStarts[i]);
}
_isValid = false;
}
protected:
/**
* Recalculate grids and clusters,
* build verlet lists and pad clusters.
* @return Vector of particles containing particles no longer in the box
*/
std::vector<Particle> rebuild() {
deleteDummyParticles();
_boundingBoxes.clear();
// get the dimensions and volumes of the box
std::array<double, 3> boxSize{};
double volume = 1.0;
for (int d = 0; d < 3; ++d) {
boxSize[d] = _boxMaxWithHalo[d] - _boxMinWithHalo[d];
volume *= boxSize[d];
}
// get all particles and clear clusters
std::vector<Particle> invalidParticles;
std::vector<Particle> outsideParticles;
for (size_t i = 0; i < this->_cells.size(); ++i) {
for (auto &p : this->_cells[i]._particles) {
if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) {
invalidParticles.push_back(p);
} else {
if (p.isOwned()) {
outsideParticles.push_back(p);
} else {
invalidParticles.push_back(p);
}
}
}
this->_cells[i].clear();
}
// estimate particle density
double density = (std::max(1.0, (double)invalidParticles.size())) / volume;
// guess optimal grid side length
_gridSideLength = std::cbrt(((double)_clusterSize) / density);
_gridSideLengthReciprocal = 1 / _gridSideLength;
// get cells per dimension
size_t sizeGrid = 1;
for (int d = 0; d < 2; d++) {
_cellsPerDim[d] = static_cast<size_t>(std::ceil(boxSize[d] * _gridSideLengthReciprocal));
sizeGrid *= _cellsPerDim[d];
}
_cellsPerDim[2] = static_cast<size_t>(1);
// resize to number of grids
this->_cells.resize(sizeGrid);
_dummyStarts.clear();
_dummyStarts.resize(sizeGrid);
_boundingBoxes.resize(sizeGrid);
// put particles into grid cells
for (size_t i = 0; i < invalidParticles.size(); ++i) {
size_t index =
(size_t)((invalidParticles[i].getR()[0] - _boxMinWithHalo[0]) * _gridSideLengthReciprocal) +
(size_t)((invalidParticles[i].getR()[1] - _boxMinWithHalo[1]) * _gridSideLengthReciprocal) * _cellsPerDim[0];
this->_cells[index].addParticle(invalidParticles[i]);
}
// sort by last dimension and add dummy particles
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel for schedule(guided)
#endif
for (size_t i = 0; i < sizeGrid; ++i) {
this->_cells[i].sortByDim(2);
const auto numParticles = this->_cells[i].numParticles();
_dummyStarts[i] = numParticles;
unsigned int numDummys = _clusterSize;
if (numParticles > 0) {
numDummys -= (numParticles % (size_t)_clusterSize);
}
Particle dummyParticle = Particle();
for (unsigned int j = 0; j < numDummys; ++j) {
dummyParticle.setR({_boxMaxWithHalo[0] + 8 * this->getInteractionLength() + static_cast<double>(i),
_boxMaxWithHalo[1] + 8 * this->getInteractionLength() + static_cast<double>(j),
_boxMaxWithHalo[2] + 8 * this->getInteractionLength()});
dummyParticle.setID(std::numeric_limits<size_t>::max());
dummyParticle.setOwned(false);
this->_cells[i].addParticle(dummyParticle);
}
}
// make bounding boxes
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel for schedule(guided)
#endif
for (size_t i = 0; i < sizeGrid; ++i) {
const size_t nClusters = this->_cells[i].numParticles() / _clusterSize;
_boundingBoxes[i].resize(nClusters, {_boxMaxWithHalo[0], _boxMaxWithHalo[1], _boxMaxWithHalo[2],
_boxMinWithHalo[0], _boxMinWithHalo[1], _boxMinWithHalo[2]});
for (size_t cid = 0; cid < nClusters; ++cid)
for (size_t pid = cid * _clusterSize; pid < _dummyStarts[i]; ++pid) {
expandBoundingBox(_boundingBoxes[i][cid], this->_cells[i][pid]);
}
}
_isValid = true;
return outsideParticles;
}
private:
/**
* Expands a bounding Box such the Particle is in it.
* @param box
* @param p
*/
void expandBoundingBox(std::array<double, 6> &box, const Particle &p) {
for (int i = 0; i < 3; ++i) {
box[i] = std::min(box[i], p.getR()[i]);
box[3 + i] = std::max(box[3 + i], p.getR()[i]);
}
}
/**
* Checks if particle is within skin of bounding box.
* @param box
* @param p
*/
bool particleInSkinOfBox(const std::array<double, 6> &box, const Particle &p) const {
for (int i = 0; i < 3; ++i) {
if (box[0 + i] - this->getSkin() > p.getR()[i] or box[3 + i] + this->getSkin() < p.getR()[i]) return false;
}
return true;
}
std::array<double, 3> _boxMinWithHalo, _boxMaxWithHalo;
/// indices where dummy particles in the cells start
std::vector<size_t> _dummyStarts;
// number of particles in a cluster
unsigned int _clusterSize;
// id of neighbor clusters of a clusters in the form [mycell][mycluster] pair(othercell, othercluster)
std::vector<std::vector<std::vector<std::pair<size_t, size_t>>>> _neighborCellIds;
size_t _neighborMatrixDim;
utils::CudaDeviceVector<unsigned int> _neighborMatrix;
// bounding boxes of all clusters (xmin,ymin,zmin,xmax,ymax,zmax)
std::vector<std::vector<std::array<double, 6>>> _boundingBoxes;
// side length of xy-grid and reciprocal
double _gridSideLength;
double _gridSideLengthReciprocal;
// dimensions of grid
std::array<size_t, 3> _cellsPerDim;
// specifies if the neighbor list is currently valid
bool _isValid;
/// Signature of the last Traversal to trigger rebuild when a new one is used
std::tuple<TraversalOption, DataLayoutOption, bool> _lastTraversalSig;
};
} // namespace autopas
|
papi_cntr.h | /**
Copyright (c) 2012, Swiss National Supercomputing Center (CSCS)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of the Swiss National Supercomputing Center (CSCS) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PAPI_COUNTER_H
#define PAPI_COUNTER_H
#include <papi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <cstdlib>
#include <cstring>
#include <cassert>
#include <cmath>
#include <vector>
#include <map>
#include <algorithm>
#include <string>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sstream>
/**
* @enum DerivedStatistics
* @brief Derived PAPI statistics
*/
enum DerivedStatistics{
Derived_FLIPS,
Derived_FLOPS,
Derived_DP_vector_FLOPS,
Derived_SP_vector_FLOPS,
Derived_L1_DMR,
Derived_L2_DMR,
Derived_L1_TMR,
Derived_L2_TMR,
Derived_L3_TMR,
Derived_Mem_Bandwidth,
Derived_BANDWIDTH_SS,
Derived_BANDWIDTH_DS
};
/**
* Get sum of the vector
* @param [in] v - input vector
* @return sum of the vector elements
*/
template <typename T>
T VectorSum(std::vector<T> const &v)
{
T sum = T();
for(size_t i=0; i<v.size(); i++)
{
sum += v[i];
}
return sum;
}
/**
* Get mean value of the vector
* @param v
* @return
*/
template <typename T>
T VectorMean(std::vector<T> const &v)
{
return VectorSum(v)/(T)v.size();
}
/**
* Write vector in Matlab format
* @param fid - file to write into
* @param name - Name of the vector (measured routine)
* @param v - input vector
*/
template <typename TVec>
void writeVecMatlab(std::ofstream &fid, const std::string & name, TVec const &v)
{
fid << name << " = [";
for(size_t i=0; i<v.size(); i++)
{
fid << v[i] << (i<(v.size()-1) ? " " : "];");
}
fid << std::endl;
}
/**
* @enum PapiFileFormat
* @brief Enumerate the different output formats for counter information \n
* LaTex support not currently implemented
*/
enum PapiFileFormat {FileFormatMatlab, FileFormatPlain, FileFormatLaTeX};
/**
* @class Papi
* @brief singleton that handles papi intialisation and access to papi calls
*/
class Papi
{
public:
/// Get papi class instance
static Papi* Instance();
/// destructor
~Papi();
/// Initialise papi
void Init();
/// Get Numbero of PAPI events
inline int GetNumberOfEvents() const
{
return eventNames.size();
};
/// Get event name
std::string const &GetEventName(const int eventIndex) const
{
assert(eventIndex<GetNumberOfEvents());
return eventNames[eventIndex];
};
/// Get event number
int GetEventNumber(const int eventIndex) const
{
assert(eventIndex<GetNumberOfEvents());
return events[eventIndex];
};
/// Get counter for a given thread
long long GetCounter(const int threadIdx, const int counterIndex) const
{
assert(counterIndex<GetNumberOfEvents());
assert(threadIdx<GetNumThreads());
return hwCounterValues[threadIdx][counterIndex];
};
/// Get time for given thread
double GetTime(int ThreadIdx) const
{
assert(ThreadIdx<GetNumThreads());
return threadTime[ThreadIdx];
};
/// Start PAPI counters
void StartCounters();
/// Stop PAPI counters
void StopCounters();
/// Get number of threads
int GetNumThreads() const
{
return numThreads;
};
/// Is counting?
bool IsCounting() const
{
return counting;
};
private:
/// Default constructor
Papi() : setup(false), debug(false), counting(false) {};
/// COPY constructor
Papi(Papi const &) {};
/// Print papi error
void papi_print_error(const int papiErrorCode) const;
bool setup;
bool debug;
bool counting;
int eventSet;
int numThreads;
std::vector<std::string> eventNames;
std::vector<int> events;
std::vector<double> threadTime;
/// actual counter HW counter values
std::vector<std::vector<long long> > hwCounterValues;
static Papi* instance;
};
/**
* @class PapiCounter
* @brief Class with counters for given routine
*/
class PapiCounter
{
public:
/// Constructor
PapiCounter();
/// Start counters
void Start();
/// Stop counters
void Stop();
/// Write to file
void WriteToStream(std::string const &routineName,
int eventId,
std::ofstream &stream,
const PapiFileFormat fileFormat);
/// Get name
std::string GetName(const int i) const
{
return names[i];
};
/// Get number
int GetNumber(const int i) const
{
return numbers[i];
};
/// Get count
long long GetValue(const int threadIdx, const int i) const
{
assert(threadIdx < GetNumThreads());
return counterValues[threadIdx][i];
};
/// Get time for thread
double GetTime(const int threadIdx) const
{
assert(threadIdx < GetNumThreads());
return times[threadIdx];
};
/// Get aggregated time
double GetTime() const
{
return VectorMean(times);
};
/// Get number of counters
int GetNumCounters() const
{
return names.size();
};
/// Get number of threads
int GetNumThreads() const
{
return Papi::Instance()->GetNumThreads();
};
/// Get counters across all threads
long long GetAggregaterdCounterValuesOverAllThreads(const int i) const;
/// print Screen
void PrintScreen();
std::vector<long long> GetIndividualValues(const int i) const;
private:
/// Is derived statistics available
bool IsDerivedStatAvailable(const DerivedStatistics statIdx) const;
/// Compute derived statistics
std::vector<double> ComputederivedStat(const DerivedStatistics statIdx);
std::vector<std::string> names;
std::vector<int> numbers;
std::vector<double> times;
/// counters for a given routine over multiple invocations
std::vector<std::vector<long long> > counterValues;
};
/**
* @class PapiCounterList
* @brief class to manage all events that we want to benchmark \n
* essentially a wrapper around map<string, PapiCoutner> where the string \n
* is the routine name or a named code section
*/
class PapiCounterList
{
public:
/// constructor
PapiCounterList() { };
/// write to stream
void WriteToFile(const std::string fileName, const PapiFileFormat fileFormat = FileFormatPlain);
/// write to stream
void WriteToFile(std::ofstream &fstream, const PapiFileFormat fileFormat = FileFormatPlain);
///print to screen
void PrintScreen();
/// add routine
void AddRoutine(const std::string routineName);
/// Routine
PapiCounter& Routine(const std::string routineName);
/// override [] to allow access to events using ["eventName"]
PapiCounter& operator[] (std::string &routineName)
{
return Routine(routineName);
};
/// operator []
PapiCounter& operator[] (std::string routineName)
{
return Routine(routineName);
};
private:
std::map<std::string, PapiCounter> routineEvents;
};
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
//// REPLACEMENT FOR CPP SOURCE FILE
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
std::string derivedStatName(DerivedStatistics statIDX){
switch(statIDX){
case Derived_FLIPS:
return std::string("derived_FLIPS");
break;
case Derived_FLOPS:
return std::string("derived_FLOPS");
break;
case Derived_DP_vector_FLOPS:
return std::string("derived_DP_vector_FLOPS");
break;
case Derived_SP_vector_FLOPS:
return std::string("derived_SP_vector_FLOPS");
break;
case Derived_L1_DMR:
return std::string("derived_L1_DMR");
break;
case Derived_L2_DMR:
return std::string("derived_L2_DMR");
break;
case Derived_L1_TMR:
return std::string("derived_L1_TMR");
break;
case Derived_L2_TMR:
return std::string("derived_L2_TMR");
break;
case Derived_L3_TMR:
return std::string("derived_L3_TMR");
break;
case Derived_Mem_Bandwidth:
return std::string("Derived_Mem_Bandwidth");
break;
case Derived_BANDWIDTH_SS:
return std::string("derived_BANDWIDTH_SS");
break;
case Derived_BANDWIDTH_DS:
return std::string("derived_BANDWIDTH_DS");
break;
}
return std::string("");
}
int findString(std::vector<std::string> const& strVec, std::string str){
std::vector<std::string>::const_iterator it;
it = std::find(strVec.begin(), strVec.end(), str);
// return -1 if str not found in strVec
if(it==strVec.end())
return -1;
return it - strVec.begin();
}
//==============================================================================
// PAPI
//==============================================================================
/// initialise Papi instance
Papi* Papi::instance = NULL;
/**
* Get papi class instance
* @return Papi instance
*/
Papi* Papi::Instance()
{
return instance ? instance : (instance = new Papi);
}
/**
* Initialise papi
*/
void Papi::Init()
{
// only initialise if not already initialised
if (setup)
{
return;
}
int papiError;
// set debugging if requested by environment variable
char *debugStr = getenv("PAPI_DEBUG");
debug = (debugStr != NULL);
if (debug)
{
std::cerr << "Papi debug mode on" << std::endl;
}
// Initialise the papi library */
papiError = PAPI_library_init(PAPI_VER_CURRENT);
if (papiError != PAPI_VER_CURRENT)
{
std::cerr << "PAPI library init error!" << std::endl;
exit (1);
}
#ifdef _OPENMP
// assume fixed thread affinity, otherwise this approach fails
papiError = PAPI_thread_init((long unsigned int (*)()) omp_get_thread_num);
if (papiError != PAPI_OK)
{
std::cerr << "Could not initialize the library with openmp."
<< std::endl;
exit (1);
}
numThreads = omp_get_max_threads();
#else
numThreads = 1;
#endif
threadTime.resize(numThreads);
// determine the number of hardware counters
int numHWCounters;
papiError = numHWCounters = PAPI_num_counters();
if (papiError <= PAPI_OK)
{
std::cerr << "PAPI error : unable to determine number of hardware counters" << std::endl;
papi_print_error (papiError);
exit (1);
}
if (debug)
{
std::cout << "There are " << numHWCounters
<< " hardware counters available" << std::endl;
}
// get user-defined list of hardware counters from environment variable
char *papiCounters = getenv("PAPI_EVENTS");
if (debug)
{
std::cout << "PAPI_EVENTS = " << papiCounters << std::endl;
}
char *result = NULL;
char delim[] = "|";
if (papiCounters == NULL)
{
result = NULL;
}
else
{
result = strtok(papiCounters, delim);
}
while (result != NULL)
{
int eventID;
papiError = PAPI_event_name_to_code(result, &eventID);
if (papiError == PAPI_OK
&& std::find(events.begin(), events.end(), eventID) == events.end())
{
eventNames.push_back(std::string(result));
events.push_back(eventID);
}
else
{
std::cerr << "Papi Error : not adding event : " << result << std::endl;
}
result = strtok(NULL, delim);
}
if (debug)
{
std::cout << "there are " << eventNames.size()
<< " requested counters" << std::endl;
}
if (GetNumberOfEvents() == 0)
{
setup = true;
return;
}
if (GetNumberOfEvents() > 127)
{
std::cerr << "Too many events selected : exiting" << std::endl;
exit(-1);
}
eventSet = PAPI_NULL;
papiError = PAPI_create_eventset(&eventSet);
if (papiError != PAPI_OK)
{
std::cerr << "Papi error : Could not create the EventSet" << std::endl;
papi_print_error(papiError);
exit(-1);
}
if (debug)
{
for (int i = 0; i < GetNumberOfEvents(); i++)
std::cerr << "Event " << i << " out of " << GetNumberOfEvents()
<< " = " << GetEventName(i) << std::endl;
}
// allocate space for counters
hwCounterValues.resize(numThreads);
for (int i = 0; i < numThreads; i++)
{
hwCounterValues[i].resize(GetNumberOfEvents());
}
setup = true;
}
/**
* Print PAPI error
* @param [in] papiErrorCode
*/
void Papi::papi_print_error(const int papiErrorCode) const
{
char * errString = PAPI_strerror(papiErrorCode);
std::cerr << "PAPI error : " << errString << std::endl;
}
/**
* Start PAPI counters
*/
void Papi::StartCounters()
{
if (!setup)
{
Init();
}
if (IsCounting())
{
std::cerr << "PAPI counters error : cannot start papi counters when they are already running"
<< std::endl;
exit(-1);
}
#ifdef _OPENMP
#pragma omp parallel
#endif
{
if (GetNumberOfEvents())
{
int papiError = PAPI_start_counters(&events[0], events.size());
if (papiError != PAPI_OK)
{
std::cerr << "PAPI error : unable to start counters" << std::endl;
papi_print_error(papiError);
exit(-1);
}
}
#ifdef _OPENMP
int threadIndex = omp_get_thread_num();
double timeTmp = omp_get_wtime();
#else
int threadIndex = 0;
double timeTmp = PAPI_get_virt_usec() / 1e6;
#endif
threadTime[threadIndex] = -timeTmp;
}
counting = true;
}
/**
* Stop PAPI counters
*/
void Papi::StopCounters()
{
if (!IsCounting())
{
std::cerr << "PAPI counters error : cannot stop papi counters when they are have not been started" << std::endl;
exit(-1);
}
#ifdef _OPENMP
#pragma omp parallel
#endif
{
#ifdef _OPENMP
int threadIndex = omp_get_thread_num();
#else
int threadIndex = 0;
#endif
if (GetNumberOfEvents())
{
int papiError = PAPI_stop_counters(&hwCounterValues[threadIndex][0], events.size());
if (papiError != PAPI_OK)
{
std::cerr << "PAPI error : unable to stop counters" << std::endl;
papi_print_error(papiError);
exit(-1);
}
}
#ifdef _OPENMP
threadTime[threadIndex] += omp_get_wtime();
#else
threadTime[threadIndex] += (PAPI_get_virt_usec() / 1e6);
#endif
}
counting = false;
}
//==============================================================================
// PapiCounter
//==============================================================================
/**
* Constructor
*/
PapiCounter::PapiCounter()
{
Papi::Instance()->Init();
const int numCounters = Papi::Instance()->GetNumberOfEvents();
const int numThreads = Papi::Instance()->GetNumThreads();
for (int i = 0; i < numCounters; i++)
{
names.push_back(Papi::Instance()->GetEventName(i));
numbers.push_back(Papi::Instance()->GetEventNumber(i));
}
counterValues.resize(numThreads);
for (int tid = 0; tid < numThreads; tid++)
{
counterValues[tid].resize(numCounters, 0LL);
}
times.resize(numThreads);
}
/**
* Stop counters (accumulate)
*/
void PapiCounter::Stop()
{
Papi::Instance()->StopCounters();
const int numCounters = Papi::Instance()->GetNumberOfEvents();
const int numThreads = Papi::Instance()->GetNumThreads();
for (int tid = 0; tid < numThreads; tid++)
{
for (int i = 0; i < numCounters; i++)
{
counterValues[tid][i] += Papi::Instance()->GetCounter(tid, i);
}
times[tid] += Papi::Instance()->GetTime(tid);
}
}
/**
* Start counter
*/
void PapiCounter::Start()
{
Papi::Instance()->StartCounters();
}
/**
* Get aggregated values over all threads
* @param i - index of the counter
* @return value for the counter over all threads
*/
long long PapiCounter::GetAggregaterdCounterValuesOverAllThreads(const int i) const
{
assert(i < GetNumCounters());
long long sum = 0LL;
for (int tid = 0; tid < GetNumThreads(); tid++)
{
sum += GetValue(tid, i);
//return sum/(long long)threads();
}
return sum;
}
/**
* Get counter values for each thread
* @param counter index
* @return vector with individual thread values for the counter
*/
std::vector<long long> PapiCounter::GetIndividualValues(const int i) const
{
assert(i < GetNumCounters());
std::vector<long long> tmp;
for (int tid = 0; tid < GetNumThreads(); tid++)
{
tmp.push_back(GetValue(tid, i));
}
return tmp;
}
/**
* Write counter to file
* @param runName
* @param eventID
* @param fileId
* @param fileFormat
*/
void PapiCounter::WriteToStream(std::string const &routineName, int eventId, std::ofstream &stream, PapiFileFormat fileFormat)
{
if (GetNumCounters())
{
int numThreads = Papi::Instance()->GetNumThreads();
switch (fileFormat)
{
case FileFormatPlain:
stream << "----------------------------" << std::endl;
stream << routineName << " :: wall time " << GetTime() << " s" << std::endl;
stream << "----------------------------" << std::endl;
if (Papi::Instance()->GetNumThreads() > 1)
{
for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++)
{
stream << " THREAD" << std::setw(2) << tid;
}
}
stream << " [ TOTAL ]" << std::endl;
for (int i = 0; i < GetNumCounters(); i++)
{
if (Papi::Instance()->GetNumThreads() > 1)
{
for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++)
{
stream << " " << std::setw(12) << GetValue(tid, i);
}
}
stream << " [ " << std::setw(12) << GetAggregaterdCounterValuesOverAllThreads(i) << " ]"
<< "\t" << GetName(i) << std::endl;
}
/*
for(int i in derivedCounters<platform>::counters){
if( derivedCounters<platform>::counters[i].is_available() )
compute;
write to screen;
}
*/
if (IsDerivedStatAvailable(Derived_FLIPS))
{
std::vector<double> stat = ComputederivedStat(Derived_FLIPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]"
<< "\tderived_FLIPS (MFLIPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_FLOPS))
{
std::vector<double> stat = ComputederivedStat(Derived_FLOPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]"
<< "\tderived_FLOPS (MFLOPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_DP_vector_FLOPS))
{
std::vector<double> stat = ComputederivedStat(Derived_DP_vector_FLOPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]"
<< "\tderived_DP_vector_FLOPS (MFLOPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_SP_vector_FLOPS))
{
std::vector<double> stat = ComputederivedStat(Derived_SP_vector_FLOPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]"
<< "\tderived_SP_vector_FLOPS (MFLOPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_L1_DMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L1_DMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
stream << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_DMR (%)" << std::endl;
} else {
stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_DMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L2_DMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L2_DMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
stream<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_DMR (%)" << std::endl;
} else {
stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_DMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L1_TMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L1_TMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
stream << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_TMR (%)" << std::endl;
} else {
stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_TMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L2_TMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L2_TMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
stream<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_TMR (%)" << std::endl;
} else {
stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_TMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L3_TMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L3_TMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
stream<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_L3_TMR (%)" << std::endl;
} else {
stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L3_TMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_Mem_Bandwidth))
{
std::vector<double> stat = ComputederivedStat(Derived_Mem_Bandwidth);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
stream << std::setw(10) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s";
}
stream<< " [ " << std::setw(10) << "-" << " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl;
} else {
stream << " [ " << std::setw(9) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s"<< " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_BANDWIDTH_SS))
{
std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_SS);
if (numThreads > 1)
for (int tid = 0; tid < numThreads; tid++)
stream << " - ";
stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_BANDWIDTH_SS (MB/s)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_BANDWIDTH_DS))
{
std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_DS);
if (numThreads > 1)
for (int tid = 0; tid < numThreads; tid++)
stream << " - ";
stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_BANDWIDTH_DS (MB/s)" << std::endl;
}
break;
case FileFormatMatlab:
stream << routineName << " = " << eventId << ";" << std::endl;
for (int i = 0; i < GetNumCounters(); i++)
{
std::stringstream vname;
vname << "event{" <<routineName<< "}.counter(" << i + 1 << ").count";
stream << "event{" <<routineName<< "}.counter(" << i + 1 << ").name = \'"
<< GetName(i) << "\';" << std::endl;
writeVecMatlab(stream, vname.str(), GetIndividualValues(i));
}
break;
case FileFormatLaTeX:
stream << "\\hline" << std::endl;
stream << "\\multicolumn{2}{c}{" << routineName << "}" << std::endl;
stream << "\\hline" << std::endl;
stream << "counter & count" << "\\\\" << std::endl;
stream << "\\hline" << std::endl;
for (int i = 0; i < GetNumCounters(); i++)
stream << "\\lst{" << GetName(i) << "}"
<< " & " << GetAggregaterdCounterValuesOverAllThreads(i) << "\\\\" << std::endl;
break;
}
}
}
/**
* Print to screan
*/
void PapiCounter::PrintScreen()
{
int numThreads = Papi::Instance()->GetNumThreads();
if (GetNumCounters() > 0)
{
if (Papi::Instance()->GetNumThreads() > 1)
{
for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++)
{
std::cout << " THREAD" << std::setw(2) << tid;
}
}
std::cout << " [ TOTAL ]" << std::endl;
for (int i = 0; i < GetNumCounters(); i++)
{
if (Papi::Instance()->GetNumThreads() > 1)
{
for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++)
{
std::cout << " " << std::setw(12) << GetValue(tid, i);
}
}
std::cout << " [ " << std::setw(12) << GetAggregaterdCounterValuesOverAllThreads(i) << " ]"
<< "\t" << GetName(i) << std::endl;
}
if (IsDerivedStatAvailable(Derived_FLIPS))
{
std::vector<double> stat = ComputederivedStat(Derived_FLIPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_FLIPS (MFLIPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_FLOPS))
{
std::vector<double> stat = ComputederivedStat(Derived_FLOPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_FLOPS (MFLOPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_DP_vector_FLOPS))
{
std::vector<double> stat = ComputederivedStat(Derived_DP_vector_FLOPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_DP_vector_FLOPS (MFLOPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_SP_vector_FLOPS))
{
std::vector<double> stat = ComputederivedStat(Derived_SP_vector_FLOPS);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6);
}
}
std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_SP_vector_FLOPS (MFLOPS)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_L1_DMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L1_DMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_DMR (%)" << std::endl;
} else {
std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_DMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L2_DMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L2_DMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_DMR (%)" << std::endl;
} else {
std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_DMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L1_TMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L1_TMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_TMR (%)" << std::endl;
} else {
std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_TMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L2_TMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L2_TMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_TMR (%)" << std::endl;
} else {
std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_TMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_L3_TMR))
{
std::vector<double> stat = ComputederivedStat(Derived_L3_TMR);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%";
}
std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L3_TMR (%)" << std::endl;
} else {
std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L3_TMR (%)" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_Mem_Bandwidth))
{
std::vector<double> stat = ComputederivedStat(Derived_Mem_Bandwidth);
if (numThreads > 1)
{
for (int tid = 0; tid < numThreads; tid++)
{
std::cout << std::setw(10) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s";
}
std::cout<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl;
} else {
std::cout << " [ " << std::setw(9) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s"<< " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl;
}
}
if (IsDerivedStatAvailable(Derived_BANDWIDTH_SS))
{
std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_SS);
if (numThreads > 1)
for (int tid = 0; tid < numThreads; tid++)
std::cout << " - ";
std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_BANDWIDTH_SS (MB/s)" << std::endl;
}
if (IsDerivedStatAvailable(Derived_BANDWIDTH_DS))
{
std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_DS);
if (numThreads > 1)
for (int tid = 0; tid < numThreads; tid++)
std::cout << " - ";
std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]"
<< "\tderived_BANDWIDTH_DS (MB/s)" << std::endl;
}
}
else
{
std::cout << "PAPI-WRAP :: no counters to print" << std::endl;
}
}
/**
* Are derived statistics available
* @param statIdx
* @return
*/
bool PapiCounter::IsDerivedStatAvailable(const DerivedStatistics statIdx) const
{
switch (statIdx)
{
case Derived_FLIPS:
return findString(names, std::string("PAPI_FP_INS")) >= 0 ? true : false;
case Derived_FLOPS:
return findString(names, std::string("PAPI_FP_OPS")) >= 0 ? true : false;
case Derived_DP_vector_FLOPS:
return findString(names, std::string("PAPI_DP_OPS")) >= 0 ? true : false;
case Derived_SP_vector_FLOPS:
return findString(names, std::string("PAPI_SP_OPS")) >= 0 ? true : false;
case Derived_L1_DMR:
return (
findString(names, std::string("PAPI_L1_DCM")) >= 0 &&
(
findString(names, std::string("PAPI_L1_DCA")) >= 0 || (
findString(names, std::string("PAPI_LD_INS")) >= 0 &&
findString(names, std::string("PAPI_SR_INS")) >= 0
)
)
);
case Derived_L2_DMR:
return (
findString(names, std::string("PAPI_L2_DCA")) >= 0 &&
findString(names, std::string("PAPI_L2_DCM")) >= 0
);
case Derived_L1_TMR:
return (
findString(names, std::string("PAPI_L1_TCA")) >= 0 &&
findString(names, std::string("PAPI_L1_TCM")) >= 0
);
case Derived_L2_TMR:
return (
findString(names, std::string("PAPI_L2_TCA")) >= 0 &&
findString(names, std::string("PAPI_L2_TCM")) >= 0
);
case Derived_L3_TMR:
return (
findString(names, std::string("PAPI_L3_TCA")) >= 0 &&
findString(names, std::string("PAPI_L3_TCM")) >= 0
);
case Derived_Mem_Bandwidth:
return findString(names, std::string("PAPI_L3_TCM")) >= 0 ? true : false;
case Derived_BANDWIDTH_SS:
return (
findString(names, std::string("SYSTEM_READ_RESPONSES:0x07")) >= 0 &&
findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01")) >= 0
);
case Derived_BANDWIDTH_DS:
return (
findString(names, std::string("SYSTEM_READ_RESPONSES:0x07")) >= 0 &&
findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01")) >= 0
);
}
return false;
}
/**
* Compute derived statistics
* @param statIdx
* @return
*/
std::vector<double> PapiCounter::ComputederivedStat(DerivedStatistics statIdx)
{
std::vector<double> derived(GetNumThreads());
int idx, idxCM, idxCA;
int idxSRS, idxOWT;
switch (statIdx)
{
case Derived_FLIPS:
idx = findString(names, std::string("PAPI_FP_INS"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
// returns total number of FP operations
// determine FLOPS by summing and dividing by time
derived[tid] = GetValue(tid, idx);
}
return derived;
case Derived_FLOPS:
idx = findString(names, std::string("PAPI_FP_OPS"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
// returns total number of FP operations
// determine FLOPS by summing and dividing by time
derived[tid] = GetValue(tid, idx);
}
return derived;
case Derived_DP_vector_FLOPS:
idx = findString(names, std::string("PAPI_DP_OPS"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
// returns total number of FP operations
// determine FLOPS by summing and dividing by time
derived[tid] = GetValue(tid, idx);
}
return derived;
case Derived_SP_vector_FLOPS:
idx = findString(names, std::string("PAPI_SP_OPS"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
// returns total number of FP operations
// determine FLOPS by summing and dividing by time
derived[tid] = GetValue(tid, idx);
}
return derived;
case Derived_L1_DMR:
idxCM = findString(names, std::string("PAPI_L1_DCM"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
double access = 0.0;
if ((idxCA = findString(names, std::string("PAPI_LD_INS"))) >= 0) {
access = (double) GetValue(tid, idxCA);
} else {
access = (double) GetValue(
tid,
findString(names, std::string("PAPI_LD_INS"))
) + (double) GetValue(
tid,
findString(names, std::string("PAPI_SR_INS"))
);
}
derived[tid] = (double) GetValue(tid, idxCM) / access;
}
return derived;
case Derived_L2_DMR:
idxCM = findString(names, std::string("PAPI_L2_DCM"));
idxCA = findString(names, std::string("PAPI_L2_DCA"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA);
}
return derived;
case Derived_L1_TMR:
idxCM = findString(names, std::string("PAPI_L1_TCM"));
idxCA = findString(names, std::string("PAPI_L1_TCA"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA);
}
return derived;
case Derived_L2_TMR:
idxCM = findString(names, std::string("PAPI_L2_TCM"));
idxCA = findString(names, std::string("PAPI_L2_TCA"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA);
}
return derived;
case Derived_L3_TMR:
idxCM = findString(names, std::string("PAPI_L3_TCM"));
idxCA = findString(names, std::string("PAPI_L3_TCA"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA);
}
return derived;
case Derived_Mem_Bandwidth:
idx = findString(names, std::string("PAPI_L3_TCM"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
// returns total number of FP operations
// determine FLOPS by summing and dividing by time
derived[tid] = GetValue(tid, idx);
}
return derived;
// see page 2101 of Shirley Moore et al., Procedia Computer Science 4 (2011)
// this needs some tweaking for Interlagos (15h):
case Derived_BANDWIDTH_SS:
idxSRS = findString(names, std::string("SYSTEM_READ_RESPONSES:0x07"));
idxOWT = findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01"));
for (int tid = 0; tid < GetNumThreads(); tid++)
{
derived[tid] = (double) GetValue(tid, idxSRS)*32. + (double) GetValue(tid, idxOWT)*8.;
}
return derived;
case Derived_BANDWIDTH_DS:
idxSRS = findString(names, std::string("SYSTEM_READ_RESPONSES:0x07"));
idxOWT = findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01"));
// dual stream: accumulate bandwidth of one core per Bulldozer module
for (int tid = 0; tid < GetNumThreads(); tid++)
{
derived[tid] = (double) GetValue(tid, idxSRS)*32. + (double) GetValue(tid, idxOWT)*4.;
}
return derived;
}
// this will never occur - just to keep the compiler warnings off
return std::vector<double>(0);
}
/*================================================
PapiCounterList
================================================*/
/**
* Add routine to the papi couner
* @param routineName
*/
void PapiCounterList::AddRoutine(const std::string routineName)
{
// ensure that someone hasn't already added an event with this name
assert(routineEvents.find(routineName) == routineEvents.end());
routineEvents[routineName] = PapiCounter();
}
/**
* Get counter for the routine
* @param routineName
* @return return counter for the routine number
*/
PapiCounter& PapiCounterList::Routine(std::string routineName)
{
// ensure that an event with ename exists
assert(routineEvents.find(routineName) != routineEvents.end());
return routineEvents[routineName];
}
/**
* Write to stram
* @param fileName
* @param fileFormat
*/
void PapiCounterList::WriteToFile(const std::string fileName, PapiFileFormat fileFormat)
{
std::ofstream fid;
fid.open(fileName.c_str());
switch (fileFormat)
{
case FileFormatMatlab:
break;
case FileFormatPlain:
break;
case FileFormatLaTeX:
fid << "\\begin{tabular}{lr}" << std::endl;
break;
}
int id = 1;
for (std::map<std::string, PapiCounter>::iterator it = routineEvents.begin();
it != routineEvents.end();
it++)
{
it->second.WriteToStream(it->first, id, fid, fileFormat);
id++;
}
switch (fileFormat)
{
case FileFormatMatlab:
break;
case FileFormatPlain:
break;
case FileFormatLaTeX:
fid << "\\hline" << std::endl;
fid << "\\end{tabular}" << std::endl;
break;
}
fid.close();
}
/**
* Write to stream
* @param fid
* @param fileFormat
*/
void PapiCounterList::WriteToFile(std::ofstream &fstream, PapiFileFormat fileFormat)
{
switch (fileFormat)
{
case FileFormatMatlab:
break;
case FileFormatPlain:
break;
case FileFormatLaTeX:
fstream << "\\begin{tabular}{lr}" << std::endl;
break;
}
int id = 1;
for (std::map<std::string, PapiCounter>::iterator it = routineEvents.begin();
it != routineEvents.end();
it++)
{
it->second.WriteToStream(it->first, id++, fstream, fileFormat);
}
switch (fileFormat)
{
case FileFormatMatlab:
break;
case FileFormatPlain:
break;
case FileFormatLaTeX:
fstream << "\\hline" << std::endl;
fstream << "\\end{tabular}" << std::endl;
break;
}
// close the file stream
fstream.close();
}
/**
* Print to screen
*/
void PapiCounterList::PrintScreen()
{
for (std::map<std::string, PapiCounter>::iterator it = routineEvents.begin();
it != routineEvents.end();
it++)
{
std::cout << "--------------------------------" << std::endl;
std::cout << it->first << " :: wall time " << it->second.GetTime() << " s" << std::endl;
std::cout << "--------------------------------" << std::endl;
it->second.PrintScreen();
}
}
#endif
|
point_point_search.h | //
// Project Name: Kratos
// Last Modified by: $Author: clabra $
// Date: $Date: 2007-03-29 19:37:47 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_POINT_POINT_SEARCH_H_INCLUDED)
#define KRATOS_POINT_POINT_SEARCH_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// include kratos definitions
#include "includes/define.h"
// Project includes
#include "utilities/openmp_utils.h"
// Configures
#include "spatial_containers/spatial_search.h"
#include "point_configure.h"
// Search
#include "spatial_containers/bins_dynamic_objects.h"
#include "spatial_containers/bins_dynamic.h"
// External includes
/* Timer defines */
#include "utilities/timer.h"
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class PointPointSearch: public SpatialSearch
{
public:
///@name Type Definitions
///@{
/// Pointer definition of PointPointSearch
KRATOS_CLASS_POINTER_DEFINITION(PointPointSearch);
typedef PointType* PointPointerType;
typedef std::vector<PointPointerType>* PointVector;
typedef std::vector<PointPointerType>::iterator PointIterator;
typedef double* DistanceVector;
typedef double* DistanceIterator;
//Configure Types
typedef PointConfigure<3> PointConfigureType;
//Bin Types
typedef BinsObjectDynamic<PointConfigureType> PointBinsType;
typedef PointerVectorSet<Point, IndexedObject> PointSetType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
PointPointSearch(){}
/// Destructor.
~PointPointSearch(){}
void SearchPointsImplementation(
NodesContainerType const& r_nodes,
NodesContainerType const& r_nodes_to_find,
RadiusArrayType const& radius,
VectorResultNodesContainerType& r_results,
VectorDistanceType& r_results_distances)
{
KRATOS_TRY
int max_n_of_neigh_nodes = r_nodes_to_find.size();
NodesContainerType::ContainerType& nodes = const_cast <NodesContainerType::ContainerType&> (r_nodes.GetContainer());
NodesContainerType::ContainerType& nodes_to_find = const_cast <NodesContainerType::ContainerType&> (r_nodes_to_find.GetContainer());
PointSetType::ContainerType nodes_temp;
PointSetType::ContainerType nodes_to_find_temp;
std::map<Point::Pointer, Node<3>::Pointer> map_point_to_node;
nodes_temp.reserve(nodes.size());
for (NodesContainerType::ContainerType::iterator it = nodes.begin(); it != nodes.end(); ++it){
auto p_point = std::make_shared<Point>((*it)->Coordinates());
nodes_temp.push_back(p_point);
}
nodes_to_find_temp.reserve(nodes_to_find.size());
for (auto it = nodes_to_find.begin(); it != nodes_to_find.end(); ++it){
auto p_point = std::make_shared<Point>((*it)->Coordinates());
nodes_to_find_temp.push_back(p_point);
map_point_to_node[p_point] = *it; //*(it.base());
}
PointBinsType bins(nodes_to_find_temp.begin(), nodes_to_find_temp.end());
#pragma omp parallel
{
PointSetType::ContainerType local_results(max_n_of_neigh_nodes);
DistanceType local_results_distances(max_n_of_neigh_nodes);
std::size_t n_of_results = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes.size()); ++i){
PointSetType::ContainerType::iterator i_results_begin = local_results.begin();
DistanceType::iterator i_distances_results_begin = local_results_distances.begin();
n_of_results = bins.SearchObjectsInRadiusExclusive(nodes_temp[i], radius[i], i_results_begin, i_distances_results_begin, max_n_of_neigh_nodes);
r_results[i].reserve(n_of_results);
for (PointSetType::ContainerType::iterator it = local_results.begin(); it != local_results.begin() + n_of_results; ++it){
r_results[i].push_back(map_point_to_node[ *it/**(it.base())*/ ]);
}
r_results_distances[i].insert(r_results_distances[i].begin(), local_results_distances.begin(), local_results_distances.begin() + n_of_results);
}
}
KRATOS_CATCH("")
}
/// Turn back information as a string.
virtual std::string Info() const override
{
std::stringstream buffer;
buffer << "PointPointSearch" ;
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const override {rOStream << "PointPointSearch";}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
PointPointSearch& operator=(PointPointSearch const& rOther)
{
return *this;
}
/// Copy constructor.
PointPointSearch(PointPointSearch const& rOther)
{
*this = rOther;
}
}; // Class PointPointSearch
} // namespace Kratos.
#endif // KRATOS_POINT_POINT_SEARCH_H_INCLUDED defined
|
serial_teams.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc, icc-19
#include "callback.h"
int main() {
#pragma omp target teams num_teams(2) thread_limit(1)
#pragma omp parallel num_threads(1)
{ printf("In teams parallel\n"); }
return 0;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_0:[0-9]+]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1
// CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_begin:
// CHECK-SAME: parent_task_id=[[INIT_TASK]]
// CHECK-SAME: {{.*}} requested_num_teams=2
// CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]]
//
// team 0
//
// initial task in the teams construct
// CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=2, index=0
// parallel region forked by runtime
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_begin:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]]
// user parallel region
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_0]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_00:[0-9]+]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_0]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_end:
// CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_0]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0
// CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_end:
// CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1
//
// team 1
//
// initial task in the teams construct
// CHECK: {{^}}[[MASTER_1:[0-9]+]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK_1:[0-9]+]], actual_parallelism=2, index=1
// parallel region forked by runtime
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_1]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1:[0-9]+]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_begin:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[IMPL_TASK_1:[0-9]+]]
// user parallel region
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_1]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11:[0-9]+]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11]], task_id=[[IMPL_TASK_1]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_end:
// CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_1]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[INIT_TASK_1]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK_1]], actual_parallelism=0, index=1
|
core32.c | #undef DT32
#define DT32 //<- This should be the ONLY difference between core32 and core64!
#ifdef DT32
#define flt float
#define DT_CALC DT_FLOAT32
#define epsilon FLT_EPSILON
#else
#define flt double
#define DT_CALC DT_FLOAT64
#define epsilon DBL_EPSILON
#endif
#define SIMD
#ifdef SIMD //explicitly vectorize (SSE,AVX,Neon)
#ifdef __x86_64__
#include <immintrin.h>
#ifdef DT32
#define kSSE32 4 //128-bit SSE handles 4 32-bit floats per instruction
#else
#define kSSE64 2 //128-bit SSE handles 2 64-bit floats per instruction
#endif
//#define myUseAVX
//#define kAVX32 8 //256-bit AVX handles 8 32-bit floats per instruction
//#define kAVX64 4 //256-bit AVX handles 4 64-bit floats per instruction
#else
#ifdef DT32
#include "sse2neon.h"
#define kSSE32 4 //128-bit SSE handles 4 32-bit floats per instruction
#else
#undef SIMD
#endif
//#undef myUseAVX
#endif
#endif
#include <float.h> //FLT_EPSILON
#include <nifti2_io.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef __aarch64__
#include "arm_malloc.h"
#else
#include <immintrin.h>
#endif
#include <limits.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "core.h"
#define bandpass
#ifdef bandpass
#include "bw.h"
#endif
//#define slicetimer //tensor_decomp support is optional
#ifdef slicetimer
#include "afni.h"
#endif
#define tensor_decomp //tensor_decomp support is optional
#ifdef tensor_decomp
#include "tensor.h"
#endif
//#define TFCE //formerly we used Christian Gaser's tfce, new bespoke code handles connectivity
//#ifdef TFCE //we now use in-built tfce function
// #include "tfce_pthread.h"
//#endif
#ifdef SIMD
#ifdef DT32
static void nifti_sqrt(flt *v, size_t n) {
flt *vin = v;
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 ma = _mm_sqrt_ps(v4);
_mm_storeu_ps(vin, ma);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] = sqrt(v[n - tail]);
tail--;
}
} // nifti_sqrt()
static void nifti_mul(flt *v, size_t n, flt slope1) {
flt *vin = v;
__m128 slope = _mm_set1_ps(slope1);
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 m = _mm_mul_ps(v4, slope);
_mm_storeu_ps(vin, m);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] *= slope1;
tail--;
}
} //nifti_mul()
static void nifti_add(flt *v, int64_t n, flt intercept1) {
//add, out = in + intercept
if (intercept1 == 0.0f)
return;
flt *vin = v;
__m128 intercept = _mm_set1_ps(intercept1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 ma = _mm_add_ps(v4, intercept);
_mm_storeu_ps(vin, ma);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] = v[n - tail] + intercept1;
tail--;
}
} //nifti_add()
static void nifti_fma(flt *v, int64_t n, flt slope1, flt intercept1) {
//multiply+add, out = in * slope + intercept
if ((slope1 == 1.0f) && (intercept1 == 0.0f))
return;
flt *vin = v;
__m128 intercept = _mm_set1_ps(intercept1);
__m128 slope = _mm_set1_ps(slope1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 m = _mm_mul_ps(v4, slope);
__m128 ma = _mm_add_ps(m, intercept);
_mm_storeu_ps(vin, ma);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] = (v[n - tail] * slope1) + intercept1;
tail--;
}
} //nifti_fma()
#else //if SIMD32 else SIMD64
static void nifti_sqrt(flt *v, size_t n) {
flt *vin = v;
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d ma = _mm_sqrt_pd(v2);
_mm_storeu_pd(vin, ma);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] = sqrt(v[n - tail]);
tail--;
}
} // nifti_sqrt()
static void nifti_mul(flt *v, size_t n, flt slope1) {
flt *vin = v;
__m128d slope = _mm_set1_pd(slope1);
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d m = _mm_mul_pd(v2, slope);
_mm_storeu_pd(vin, m);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] *= slope1;
tail--;
}
} //nifti_mul()
static void nifti_add(flt *v, int64_t n, flt intercept1) {
//add, out = in + intercept
if (intercept1 == 0.0f)
return;
flt *vin = v;
__m128d intercept = _mm_set1_pd(intercept1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d ma = _mm_add_pd(v2, intercept);
_mm_storeu_pd(vin, ma);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] = v[n - tail] + intercept1;
tail--;
}
} //nifti_add()
static void nifti_fma(flt *v, int64_t n, flt slope1, flt intercept1) {
//multiply+add, out = in * slope + intercept
if ((slope1 == 1.0f) && (intercept1 == 0.0f))
return;
flt *vin = v;
__m128d intercept = _mm_set1_pd(intercept1);
__m128d slope = _mm_set1_pd(slope1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d m = _mm_mul_pd(v2, slope);
__m128d ma = _mm_add_pd(m, intercept);
_mm_storeu_pd(vin, ma);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] = (v[n - tail] * slope1) + intercept1;
tail--;
}
} //nifti_fma()
#endif //end SIMD64
#else //if SIMD vectorized, else scalar
static void nifti_sqrt(flt *v, size_t n) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] = sqrt(v[i]);
} //nifti_sqrt()
static void nifti_mul(flt *v, size_t n, flt slope1) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] *= slope1;
} //nifti_mul()
static void nifti_add(flt *v, size_t n, flt intercept1) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] += intercept1;
} //nifti_add()
static void nifti_fma(flt *v, size_t n, flt slope1, flt intercept1) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] = (v[i] * slope1) + intercept1;
} //nifti_fma
#endif //if vector SIMD else scalar
static int show_helpx(void) {
printf("Fatal: show_help shown by wrapper function\n");
exit(1);
}
static flt vx(flt *f, int p, int q) {
flt ret = ((f[q] + q * q) - (f[p] + p * p)) / (2.0 * q - 2.0 * p);
if (isnan(ret))
ret = INFINITY;
return ret;
}
inline void transposeXY( flt *img3Din, flt *img3Dout, int *nxp, int *nyp, int nz) {
//transpose X and Y dimensions: rows <-> columns
//Note: in future we could use SIMD to transpose values in tiles
// https://stackoverflow.com/questions/16737298/what-is-the-fastest-way-to-transpose-a-matrix-in-c
int nx = *nxp;
int ny = *nyp;
size_t vi = 0; //volume offset
for (int z = 0; z < nz; z++) {
int zo = z * nx * ny;
for (int y = 0; y < ny; y++) {
int xo = 0;
for (int x = 0; x < nx; x++) {
img3Dout[zo + xo + y] = img3Din[vi];
xo += ny;
vi += 1;
}
}
}
*nxp = ny;
*nyp = nx;
}
inline void transposeXZ( flt *img3Din, flt *img3Dout, int *nxp, int ny, int *nzp) {
//transpose X and Z dimensions: slices <-> columns
int nx = *nxp;
int nz = *nzp;
int nyz = ny * nz;
size_t vi = 0; //volume offset
for (int z = 0; z < nz; z++) {
for (int y = 0; y < ny; y++) {
int yo = y * nz;
int zo = 0;
for (int x = 0; x < nx; x++) {
img3Dout[z + yo + zo] = img3Din[vi];
zo += nyz;
vi += 1;
}
}
}
*nxp = nz;
*nzp = nx;
}
static void edt(flt *f, int n) {
int q, p, k;
flt s, dx;
flt *d = (flt *)_mm_malloc((n+2) * sizeof(flt), 64);
flt *z = (flt *)_mm_malloc((n+2) * sizeof(flt), 64);
int *v = (int *)_mm_malloc((n+2) * sizeof(int), 64);
/*# Find the lower envelope of a sequence of parabolas.
# f...source data (returns the Y of the parabola vertex at X)
# d...destination data (final distance values are written here)
# z...temporary used to store X coords of parabola intersections
# v...temporary used to store X coords of parabola vertices
# i...resulting X coords of parabola vertices
# n...number of pixels in "f" to process
# Always add the first pixel to the enveloping set since it is
# obviously lower than all parabolas processed so far.*/
k = 0;
v[0] = 0;
z[0] = -INFINITY;
z[1] = INFINITY;
for (q = 1; q < n; q++) {
/* If the new parabola is lower than the right-most parabola in
# the envelope, remove it from the envelope. To make this
# determination, find the X coordinate of the intersection (s)
# between the parabolas with vertices at (q,f[q]) and (p,f[p]).*/
p = v[k];
s = vx(f, p, q);
//while (s <= z[k]) {
while ((s <= z[k]) && (k > 0)) {
k = k - 1;
p = v[k];
s = vx(f, p, q);
}
//# Add the new parabola to the envelope.
k = k + 1;
v[k] = q;
z[k] = s;
z[k + 1] = INFINITY;
}
/*# Go back through the parabolas in the envelope and evaluate them
# in order to populate the distance values at each X coordinate.*/
k = 0;
for (q = 0; q < n; q++) {
while (z[k + 1] < q)
k = k + 1;
dx = (q - v[k]);
d[q] = dx * dx + f[v[k]];
}
for (q = 0; q < n; q++)
f[q] = d[q];
_mm_free(d);
_mm_free(z);
_mm_free(v);
}
static void edt1(flt *df, int n) { //first dimension is simple
int q, prevX;
flt prevY, v;
prevX = 0;
prevY = INFINITY;
//forward
for (q = 0; q < n; q++) {
if (df[q] == 0) {
prevX = q;
prevY = 0;
} else
df[q] = sqr(q - prevX) + prevY;
}
//reverse
prevX = n;
prevY = INFINITY;
for (q = (n - 1); q >= 0; q--) {
v = sqr(q - prevX) + prevY;
if (df[q] < v) {
prevX = q;
prevY = df[q];
} else
df[q] = v;
}
}
static int nifti_edt(nifti_image *nim) {
//https://github.com/neurolabusc/DistanceFields
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *img = (flt *)nim->data;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int nVol = nim->nvox / nvox3D;
if ((nvox3D * nVol) != nim->nvox)
return 1;
int nx = nim->nx;
int ny = nim->ny;
int nz = nim->nz;
flt threshold = 0.0;
for (size_t i = 0; i < nim->nvox; i++) {
if (img[i] > threshold)
img[i] = INFINITY;
else
img[i] = 0;
}
size_t nRow = 1;
for (int i = 2; i < 8; i++)
nRow *= MAX(nim->dim[i], 1);
//EDT in left-right direction
flt *imgRow = img;
for (int r = 0; r < nRow; r++)
edt1(imgRow += nx, nx);
//EDT in anterior-posterior direction
nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows
for (int v = 0; v < nVol; v++) { //transpose each volume separately
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXY(&img[vo], img3D, &nx, &ny, nz);
//perform EDT for all "rows"
flt *imgRow = img3D;
for (int r = 0; r < nRow; r++)
edt(imgRow += nx, nx);
transposeXY(img3D, &img[vo], &nx, &ny, nz);
_mm_free(img3D);
} //for each volume
//EDT in head-foot direction
nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows
#pragma omp parallel for
for (int v = 0; v < nVol; v++) { //transpose each volume separately
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXZ(&img[vo], img3D, &nx, ny, &nz);
//perform EDT for all "rows"
flt *imgRow = img3D;
for (int r = 0; r < nRow; r++)
edt(imgRow += nx, nx);
transposeXZ(img3D, &img[vo], &nx, ny, &nz);
_mm_free(img3D);
} //for each volume
return 0;
}
//kernelWid influences width of kernel, use negative values for round, positive for ceil
// kenrnelWid of 2.5 means the kernel will be (2 * ceil(2.5 * sigma))+1 voxels wide
// kenrnelWid of -6.0 means the kernel will be (2 * round(6.0 * sigma))+1 voxels wide
// 2.5 AFNI ceil(2.5) https://github.com/afni/afni/blob/25e77d564f2c67ff480fa99a7b8e48ec2d9a89fc/src/edt_blur.c#L1391
// -6 SPM round(6) https://github.com/spm/spm12/blob/3085dac00ac804adb190a7e82c6ef11866c8af02/spm_smooth.m#L97
// -6 FSL round(6) (estimated)
// -3 opencv round(3) or round(4) depending on datatype https://github.com/opencv/opencv/blob/9c23f2f1a682faa9f0b2c2223a857c7d93ba65a6/modules/imgproc/src/smooth.cpp#L3782
//bioimagesuite floor(1.5) https://github.com/bioimagesuiteweb/bisweb/blob/210d678c92fd404287fe5766136379ec94750eb2/js/utilities/bis_imagesmoothreslice.js#L133
//Gaussian blur, both serial and parallel variants, https://github.com/neurolabusc/niiSmooth
static void blurS(flt *img, int nx, int ny, flt xmm, flt Sigmamm, flt kernelWid) {
//serial blur
//make kernels
if ((xmm == 0) || (nx < 2) || (ny < 1) || (Sigmamm <= 0.0))
return;
//flt sigma = (FWHMmm/xmm)/sqrt(8*log(2));
flt sigma = (Sigmamm / xmm); //mm to vox
//round(6*sigma), ceil(4*sigma) seems spot on larger than fslmaths
//int cutoffvox = round(6*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
int cutoffvox;
if (kernelWid < 0)
cutoffvox = round(fabs(kernelWid) * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
else
cutoffvox = ceil(kernelWid * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
//printf(".Blur Cutoff (%g) %d\n", 4*sigma, cutoffvox);
//validated on SPM12's 1.5mm isotropic mask_ICV.nii (discrete jump in number of non-zero voxels)
//fslmaths mask -s 2.26 f6.nii //Blur Cutoff (6.02667) 7
//fslmaths mask -s 2.24 f4.nii //Blur Cutoff (5.97333) 6
cutoffvox = MAX(cutoffvox, 1);
flt *k = (flt *)_mm_malloc((cutoffvox + 1) * sizeof(flt), 64); //FIR Gaussian
flt expd = 2 * sigma * sigma;
for (int i = 0; i <= cutoffvox; i++)
k[i] = exp(-1.0f * (i * i) / expd);
//calculate start, end for each voxel in
int *kStart = (int *)_mm_malloc(nx * sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox
int *kEnd = (int *)_mm_malloc(nx * sizeof(int), 64); //+cutoff except right columns
flt *kWeight = (flt *)_mm_malloc(nx * sizeof(flt), 64); //ensure sum of kernel = 1.0
for (int i = 0; i < nx; i++) {
kStart[i] = MAX(-cutoffvox, -i); //do not read below 0
kEnd[i] = MIN(cutoffvox, nx - i - 1); //do not read beyond final columnn
if ((i > 0) && (kStart[i] == (kStart[i - 1])) && (kEnd[i] == (kEnd[i - 1]))) { //reuse weight
kWeight[i] = kWeight[i - 1];
continue;
}
flt wt = 0.0f;
for (int j = kStart[i]; j <= kEnd[i]; j++)
wt += k[abs(j)];
kWeight[i] = 1 / wt;
//printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]);
}
//apply kernel to each row
flt *tmp = _mm_malloc(nx * sizeof(flt), 64); //input values prior to blur
for (int y = 0; y < ny; y++) {
//printf("-+ %d:%d\n", y, ny);
memcpy(tmp, img, nx * sizeof(flt));
for (int x = 0; x < nx; x++) {
flt sum = 0;
for (int i = kStart[x]; i <= kEnd[x]; i++)
sum += tmp[x + i] * k[abs(i)];
img[x] = sum * kWeight[x];
}
img += nx;
} //blurX
//free kernel
_mm_free(tmp);
_mm_free(k);
_mm_free(kStart);
_mm_free(kEnd);
_mm_free(kWeight);
}
#if defined(_OPENMP)
static void blurP(flt *img, int nx, int ny, flt xmm, flt FWHMmm, flt kernelWid) {
//parallel blur
//make kernels
if ((xmm == 0) || (nx < 2) || (ny < 1) || (FWHMmm <= 0.0))
return;
//flt sigma = (FWHMmm/xmm)/sqrt(8*log(2));
flt sigma = (FWHMmm / xmm); //mm to vox
int cutoffvox;
if (kernelWid < 0)
cutoffvox = round(fabs(kernelWid) * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
else
cutoffvox = ceil(kernelWid * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
cutoffvox = MAX(cutoffvox, 1);
flt *k = (flt *)_mm_malloc((cutoffvox + 1) * sizeof(flt), 64); //FIR Gaussian
flt expd = 2 * sigma * sigma;
for (int i = 0; i <= cutoffvox; i++)
k[i] = exp(-1.0f * (i * i) / expd);
//calculate start, end for each voxel in
int *kStart = (int *)_mm_malloc(nx * sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox
int *kEnd = (int *)_mm_malloc(nx * sizeof(int), 64); //+cutoff except right columns
flt *kWeight = (flt *)_mm_malloc(nx * sizeof(flt), 64); //ensure sum of kernel = 1.0
for (int i = 0; i < nx; i++) {
kStart[i] = MAX(-cutoffvox, -i); //do not read below 0
kEnd[i] = MIN(cutoffvox, nx - i - 1); //do not read beyond final columnn
if ((i > 0) && (kStart[i] == (kStart[i - 1])) && (kEnd[i] == (kEnd[i - 1]))) { //reuse weight
kWeight[i] = kWeight[i - 1];
continue;
}
flt wt = 0.0f;
for (int j = kStart[i]; j <= kEnd[i]; j++)
wt += k[abs(j)];
kWeight[i] = 1 / wt;
//printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]);
}
//apply kernel to each row
#pragma omp parallel for
for (int y = 0; y < ny; y++) {
flt *tmp = _mm_malloc(nx * sizeof(flt), 64); //input values prior to blur
flt *imgx = img;
imgx += (nx * y);
memcpy(tmp, imgx, nx * sizeof(flt));
for (int x = 0; x < nx; x++) {
flt sum = 0;
for (int i = kStart[x]; i <= kEnd[x]; i++)
sum += tmp[x + i] * k[abs(i)];
imgx[x] = sum * kWeight[x];
}
_mm_free(tmp);
}
//free kernel
_mm_free(k);
_mm_free(kStart);
_mm_free(kEnd);
_mm_free(kWeight);
} //blurP
#endif
static int nifti_smooth_gauss(nifti_image *nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt kernelWid) {
//https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
if ((SigmammX == 0) && (SigmammY == 0) && (SigmammZ == 0))
return 0; //all done: no smoothing, e.g. small kernel for difference of Gaussian
if (SigmammX < 0) //negative values for voxels, not mm
SigmammX = -SigmammX * nim->dx;
if (SigmammY < 0) //negative values for voxels, not mm
SigmammY = -SigmammY * nim->dy;
if (SigmammZ < 0) //negative values for voxels, not mm
SigmammZ = -SigmammZ * nim->dz;
flt *img = (flt *)nim->data;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int nVol = nim->nvox / nvox3D;
if ((nvox3D * nVol) != nim->nvox)
return 1;
int nx = nim->nx;
int ny = nim->ny;
int nz = nim->nz;
if (SigmammX <= 0.0)
goto DO_Y_BLUR;
//BLUR X
int nRow = 1;
for (int i = 2; i < 8; i++)
nRow *= MAX(nim->dim[i], 1);
#if defined(_OPENMP)
if (omp_get_max_threads() > 1)
blurP(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid);
else
blurS(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid);
#else
blurS(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid);
#endif
//blurX(img, nim->nx, nRow, nim->dx, SigmammX);
DO_Y_BLUR:
//BLUR Y
if (SigmammY <= 0.0)
goto DO_Z_BLUR;
nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows
#pragma omp parallel for
for (int v = 0; v < nVol; v++) { //transpose each volume separately
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXY(&img[vo], img3D, &nx, &ny, nz);
blurS(img3D, nim->ny, nRow, nim->dy, SigmammY, kernelWid);
transposeXY(img3D, &img[vo], &nx, &ny, nz);
_mm_free(img3D);
} //for each volume
DO_Z_BLUR:
//BLUR Z:
if ((SigmammZ <= 0.0) || (nim->nz < 2))
return 0; //all done!
nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows
#pragma omp parallel for
for (int v = 0; v < nVol; v++) { //transpose each volume separately
//printf("volume %d uses thread %d\n", v, omp_get_thread_num());
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXZ(&img[vo], img3D, &nx, ny, &nz);
blurS(img3D, nim->nz, nRow, nim->dz, SigmammZ, kernelWid);
transposeXZ(img3D, &img[vo], &nx, ny, &nz);
_mm_free(img3D);
} //for each volume
return 0;
} // nifti_smooth_gauss()
static int nifti_smooth_gauss_vox(nifti_image *nim, flt SigmaVox) {
flt SigmammX = SigmaVox * nim->dx;
flt SigmammY = SigmaVox * nim->dy;
flt SigmammZ = SigmaVox * nim->dz;
return nifti_smooth_gauss(nim, SigmammX, SigmammY, SigmammZ, -6.0);
} // nifti_smooth_gauss_vox()
static int nifti_dog(nifti_image *nim, flt SigmammPos, flt SigmammNeg, int isEdge) {
//Difference of Gaussians (DoG): difference ratio of 1.6 approximates a Laplacian of Gaussian
// https://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm
flt kKernelWid = 2.5; //ceil(2.5)
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1) || (nim->datatype != DT_CALC))
return 1;
if (SigmammPos == SigmammNeg) {
fprintf(stderr, "Difference of Gaussian requires two different sigma values.\n");
return 1;
}
if ((SigmammPos < 0) || (SigmammNeg < 0)) {
fprintf(stderr, "Difference of Gaussian requires positive values of sigma.\n");
return 1;
}
flt sigmaMn = MIN(SigmammNeg, SigmammPos);
flt sigmaMx = MAX(SigmammNeg, SigmammPos);
//Optimization: use results from narrow blur (sigmaMn) as inputs for wide blur (sigmaMx)
//consider desired blurs of 2mm and 3.2mm, we can instead compute 2mm and 2.5mmm
//only about 10% faster for difference ratio of 2.0, but also removes one copy
//https://computergraphics.stackexchange.com/questions/256/is-doing-multiple-gaussian-blurs-the-same-as-doing-one-larger-blur
sigmaMx = sqrt((sigmaMx*sigmaMx) - (sigmaMn*sigmaMn));
flt *inimg = (flt *)nim->data;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int nVol = nim->nvox / nvox3D;
int64_t nvox4D = nvox3D * nVol;
int ret = nifti_smooth_gauss(nim, sigmaMn, sigmaMn, sigmaMn, kKernelWid);
if (ret != 0)
return ret;
flt *imgMn = (flt *)_mm_malloc(nvox4D * sizeof(flt), 64); //alloc for each volume to allow openmp
for (int64_t i = 0; i < nvox4D; i++)
imgMn[i] = inimg[i];
ret = nifti_smooth_gauss(nim, sigmaMx, sigmaMx, sigmaMx, kKernelWid);
if (SigmammPos > SigmammNeg) {
for (int64_t i = 0; i < nvox4D; i++)
inimg[i] = inimg[i] - imgMn[i];
} else {
for (int64_t i = 0; i < nvox4D; i++)
inimg[i] = imgMn[i] - inimg[i];
}
if (!isEdge) {
_mm_free(imgMn);
return ret; //return continuous values
}
//we will define edges as voxels with zero crossings
for (int64_t i = 0; i < nvox4D; i++)
imgMn[i] = 0.0;
int nx = nim->nx;
int nxy = nx * nim->ny;
int nxyz = nxy * nim->nz;
for (int v = 0; v < nVol; v++)
for (int z = 1; z < (nim->nz -1); z++)
for (int y = 1; y < (nim->ny - 1); y++)
for (size_t x = 1; x < (nim->nx - 1); x++) {
int64_t i = x + (y * nx) + (z * nxy) + (v * nxyz);
flt val = inimg[i];
if ((isEdge == 1) && (val <= 0.0)) continue; //one sided edge
if (val == 0.0) continue; //masked images will have a lot of zeros!
//logic: pos*neg = neg; pos*pos=pos; neg*neg=neg
//check six neighbors that share a face
if (val * inimg[i-1] < 0) { imgMn[i] = 1.0; continue;}
if (val * inimg[i+1] < 0) { imgMn[i] = 1.0; continue;}
if (val * inimg[i-nx] < 0) { imgMn[i] = 1.0; continue;}
if (val * inimg[i+nx] < 0) { imgMn[i] = 1.0; continue;}
if (val * inimg[i-nxy] < 0) { imgMn[i] = 1.0; continue;}
if (val * inimg[i+nxy] < 0) { imgMn[i] = 1.0; continue;}
if (isEdge == 1) continue; //dog1 is binary
//check 12 neighbors that share an edge
if (val * inimg[i-1-nxy] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i+1-nxy] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i-nx-nxy] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i+nx-nxy] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i-1-nx] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i+1-nx] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i-1+nx] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i+1+nx] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i-1+nxy] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i+1+nxy] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i-nx+nxy] < 0) { imgMn[i] = 0.5; continue;}
if (val * inimg[i+nx+nxy] < 0) { imgMn[i] = 0.5; continue;}
//check 8 neighbors that share a corner
if (val * inimg[i-1-nx-nxy] < 0) { imgMn[i] = 0.25; continue;}
if (val * inimg[i+1-nx-nxy] < 0) { imgMn[i] = 0.25; continue;}
if (val * inimg[i-1+nx-nxy] < 0) { imgMn[i] = 0.25; continue;}
if (val * inimg[i+1+nx-nxy] < 0) { imgMn[i] = 0.25; continue;}
if (val * inimg[i-1-nx+nxy] < 0) { imgMn[i] = 0.25; continue;}
if (val * inimg[i+1-nx+nxy] < 0) { imgMn[i] = 0.25; continue;}
if (val * inimg[i-1+nx+nxy] < 0) { imgMn[i] = 0.25; continue;}
if (val * inimg[i+1+nx+nxy] < 0) { imgMn[i] = 0.25; continue;}
}
for (int64_t i = 0; i < nvox4D; i++)
inimg[i] = imgMn[i];
nim->scl_inter = 0.0;
nim->scl_slope = 1.0;
nim->cal_min = 0.0;
nim->cal_max = 1.0;
_mm_free(imgMn);
return ret;
} // nifti_dog()
static int nifti_otsu(nifti_image *nim, int ignoreZeroVoxels) { //binarize image using Otsu's method
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *inimg = (flt *)nim->data;
flt mn = INFINITY; //better that inimg[0] in case NaN
flt mx = -INFINITY;
for (int i = 0; i < nim->nvox; i++) {
mn = MIN(mn, inimg[i]);
mx = MAX(mx, inimg[i]);
}
if (mn >= mx)
return 0; //no variability
#define nBins 1001
flt scl = (nBins - 1) / (mx - mn);
int hist[nBins];
for (int i = 0; i < nBins; i++)
hist[i] = 0;
if (ignoreZeroVoxels) {
for (int i = 0; i < nim->nvox; i++) {
if (isnan(inimg[i]))
continue;
if (inimg[i] == 0.0)
continue;
hist[(int)round((inimg[i] - mn) * scl)]++;
}
} else {
for (int i = 0; i < nim->nvox; i++) {
if (isnan(inimg[i]))
continue;
hist[(int)round((inimg[i] - mn) * scl)]++;
}
}
//https://en.wikipedia.org/wiki/Otsu%27s_method
size_t total = 0;
for (int i = 0; i < nBins; i++)
total += hist[i];
int top = nBins - 1;
int level = 0;
double sumB = 0;
double wB = 0;
double maximum = 0.0;
double sum1 = 0.0;
for (int i = 0; i < nBins; i++)
sum1 += (i * hist[i]);
for (int ii = 0; ii < nBins; ii++) {
double wF = total - wB;
if ((wB > 0) && (wF > 0)) {
double mF = (sum1 - sumB) / wF;
double val = wB * wF * ((sumB / wB) - mF) * ((sumB / wB) - mF);
if (val >= maximum) {
level = ii;
maximum = val;
}
}
wB = wB + hist[ii];
sumB = sumB + (ii - 1) * hist[ii];
}
double threshold = (level / scl) + mn;
if (ignoreZeroVoxels) {
for (int i = 0; i < nim->nvox; i++) {
if (inimg[i] == 0.0)
continue;
inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0;
}
} else {
for (int i = 0; i < nim->nvox; i++)
inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0;
}
//fprintf(stderr,"range %g..%g threshold %g bin %d\n", mn, mx, threshold, level);
return 0;
} // nifti_otsu()
static int nifti_unsharp(nifti_image *nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt amount) {
//https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
if (amount == 0.0)
return 0;
flt *inimg = (flt *)nim->data;
void *indat = (void *)nim->data;
flt mn = INFINITY; //better that inimg[0] in case NaN
flt mx = -INFINITY;
for (int i = 0; i < nim->nvox; i++) {
mn = MIN(mn, inimg[i]);
mx = MAX(mx, inimg[i]);
}
if (mn >= mx)
return 0; //no variability
size_t nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
size_t nVol = nim->nvox / nvox3D;
if ((nvox3D * nVol) != nim->nvox)
return 1;
//process each 3D volume independently: reduce memory pressure
nim->nvox = nvox3D;
void *sdat = (void *)calloc(1, nim->nvox * sizeof(flt));
nim->data = sdat;
flt *simg = (flt *)sdat;
for (int v = 0; v < nVol; v++) {
memcpy(simg, inimg, nim->nvox * sizeof(flt));
nifti_smooth_gauss(nim, SigmammX, SigmammY, SigmammZ, 2.5); //2.5: a relatively narrow kernel for speed
for (int i = 0; i < nim->nvox; i++) {
//sharpened = original + (original - blurred) * amount
inimg[i] += (inimg[i] - simg[i]) * amount;
//keep in original range
inimg[i] = MAX(inimg[i], mn);
inimg[i] = MIN(inimg[i], mx);
}
inimg += nim->nvox;
}
free(sdat);
//return original data
nim->data = indat;
nim->nvox = nvox3D * nVol;
return 0;
} //nifti_unsharp()
static int nifti_crop(nifti_image *nim, int tmin, int tsize) {
if (tsize == 0) {
fprintf(stderr, "tsize must not be 0\n");
return 1;
}
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0))
return 1;
int nvol = (nim->nvox / nvox3D); //in
if (nvol < 2) {
fprintf(stderr, "crop only appropriate for 4D volumes");
return 1;
}
if (tmin >= nvol) {
fprintf(stderr, "tmin must be from 0..%d, not %d\n", nvol - 1, tmin);
return 1;
}
int tminVol = MAX(0, tmin);
int tFinalVol = tminVol + tsize - 1; //e.g. if tmin=0 and tsize=1, tFinal=0
if (tsize < 0) {
tFinalVol = INT_MAX;
}
tFinalVol = MIN(tFinalVol, nvol - 1);
if ((tminVol == 0) && (tFinalVol == (nvol - 1)))
return 0;
int nvolOut = tFinalVol - tminVol + 1;
flt *imgIn = (flt *)nim->data;
nim->nvox = nvox3D * nvolOut;
void *dat = (void *)calloc(1, nim->nvox * sizeof(flt));
flt *imgOut = (flt *)dat;
imgIn += tminVol * nvox3D;
memcpy(imgOut, imgIn, nim->nvox * sizeof(flt));
free(nim->data);
nim->data = dat;
if (nvolOut == 1)
nim->dim[0] = 3;
else
nim->dim[0] = 4;
nim->ndim = nim->dim[0];
nim->dim[4] = nvolOut;
nim->nt = nvolOut;
nim->nu = 1;
nim->nv = 1;
nim->nw = 1;
for (int i = 5; i < 8; i++)
nim->dim[i] = 1;
return 0;
}
static int nifti_rescale(nifti_image *nim, double scale, double intercept) {
//linear transform of data
if (nim->nvox < 1)
return 1;
if (nim->datatype == DT_CALC) {
flt scl = scale;
flt inter = intercept;
flt *f32 = (flt *)nim->data;
if (intercept == 0.0) {
if (scale == 1.0)
return 0; //nothing to do
nifti_mul(f32, nim->nvox, scl);
return 0;
} else if (scale == 1.0) {
nifti_mul(f32, nim->nvox, intercept);
return 0;
}
nifti_fma(f32, nim->nvox, scl, inter);
//for (size_t i = 0; i < nim->nvox; i++ )
// f32[i] = (f32[i] * scl) + inter;
return 0;
}
fprintf(stderr, "nifti_rescale: Unsupported datatype %d\n", nim->datatype);
return 1;
}
static int nifti_tfceS(nifti_image *nim, double H, double E, int c, int x, int y, int z, double tfce_thresh) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
if ((x < 0) || (x >= nim->dim[1]) || (y < 0) || (y >= nim->dim[2]) || (z < 0) || (z >= nim->dim[3])) {
fprintf(stderr, "tfceS x/y/z must be in range 0..%" PRId64 "/0..%" PRId64 "/0..%" PRId64 "\n", nim->dim[1] - 1, nim->dim[2] - 1, nim->dim[3] - 1);
}
if (!neg_determ(nim))
x = nim->dim[1] - x - 1;
int seed = x + (y * nim->dim[1]) + (z * nim->dim[1] * nim->dim[2]);
flt *inimg = (flt *)nim->data;
if (inimg[seed] < H) {
fprintf(stderr, "it doesn't reach to specified threshold\n");
return 1;
}
size_t nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
if (nim->nvox > nvox3D) {
fprintf(stderr, "tfceS not suitable for 4D data.\n");
return 1;
}
//printf("peak %g\n", inimg[seed]);
int numk = c;
if ((c != 6) && (c != 18) && (c != 26)) {
fprintf(stderr, "suitable values for c are 6, 18 or 26\n");
numk = 6;
}
//set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap
int32_t *k = (int32_t *)_mm_malloc(3 * numk * sizeof(int32_t), 64); //kernel: offset, x, y
int mxDx = 1; //connectivity 6: faces only
if (numk == 18)
mxDx = 2; //connectivity 18: faces+edges
if (numk == 26)
mxDx = 3; //connectivity 26: faces+edges+corners
int j = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int dx = abs(x) + abs(y) + abs(z);
if ((dx > mxDx) || (dx == 0))
continue;
k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny);
k[j + numk] = x; //avoid left-right wrap
k[j + numk + numk] = x; //avoid anterior-posterior wrap
j++;
} //for x
flt mx = (inimg[0]);
for (size_t i = 0; i < nvox3D; i++)
mx = MAX((inimg[i]), mx);
double dh = mx / 100.0;
flt *outimg = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //output image
int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed
uint8_t *vxs = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64);
for (int i = 0; i < nvox3D; i++)
outimg[i] = 0.0;
int n_steps = (int)ceil(mx / dh);
//for (int step=0; step<n_steps; step++) {
for (int step = n_steps - 1; step >= 0; step--) {
flt thresh = (step + 1) * dh;
memset(vxs, 0, nvox3D * sizeof(uint8_t));
for (int i = 0; i < nvox3D; i++)
if (inimg[i] >= thresh)
vxs[i] = 1; //survives, unclustered
int qlo = 0;
int qhi = 0;
q[qhi] = seed; //add starting voxel as seed in queue
vxs[seed] = 0; //do not find again!
while (qhi >= qlo) { //first in, first out queue
//retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity)
for (int j = 0; j < numk; j++) {
int jj = q[qlo] + k[j];
if ((jj < 0) || (jj >= nvox3D))
continue; //voxel in volume
if (vxs[jj] == 0)
continue; //already found or did not survive threshold
int dx = x + k[j + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + k[j + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
//add new seed:
vxs[jj] = 0; //do not find again!
qhi++;
q[qhi] = jj;
}
qlo++;
} //while qhi >= qlo: continue until all seeds tested
flt valToAdd = pow(qhi + 1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1
for (int j = 0; j <= qhi; j++)
outimg[q[j]] += valToAdd;
//printf("step %d thresh %g\n", step, outimg[seed]);
if (outimg[seed] >= tfce_thresh)
break;
} //for each step
if (outimg[seed] < tfce_thresh)
fprintf(stderr, "it doesn't reach to specified threshold (%g < %g)\n", outimg[seed], tfce_thresh);
for (size_t i = 0; i < nvox3D; i++)
if (outimg[i] == 0.0)
inimg[i] = 0.0;
_mm_free(q);
_mm_free(vxs);
_mm_free(outimg);
_mm_free(k);
return 0;
}
static int nifti_tfce(nifti_image *nim, double H, double E, int c) {
//https://www.fmrib.ox.ac.uk/datasets/techrep/tr08ss1/tr08ss1.pdf
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nim->nvox / nvox3D;
int numk = c;
if ((c != 6) && (c != 18) && (c != 26)) {
fprintf(stderr, "suitable values for c are 6, 18 or 26\n");
numk = 6;
}
//set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap
int32_t *k = (int32_t *)_mm_malloc(3 * numk * sizeof(int32_t), 64); //kernel: offset, x, y
int mxDx = 1; //connectivity 6: faces only
if (numk == 18)
mxDx = 2; //connectivity 18: faces+edges
if (numk == 26)
mxDx = 3; //connectivity 26: faces+edges+corners
int j = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int dx = abs(x) + abs(y) + abs(z);
if ((dx > mxDx) || (dx == 0))
continue;
k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny);
k[j + numk] = x; //avoid left-right wrap
k[j + numk + numk] = x; //avoid anterior-posterior wrap
j++;
} //for x
//omp notes: here we compute each volume independently.
// Christian Gaser computes the step loop in parallel, which accelerates 3D cases
// This code is very quick on 3D, so this does not seem crucial, and avoids critical sections
#pragma omp parallel for
for (int vol = 0; vol < nvol; vol++) {
//identify clusters
flt *inimg = (flt *)nim->data;
inimg += vol * nvox3D;
flt mx = (inimg[0]);
for (size_t i = 0; i < nvox3D; i++)
mx = MAX((inimg[i]), mx);
double dh = mx / 100.0;
flt *outimg = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);//output image
int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed
uint8_t *vxs = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64);
for (int i = 0; i < nvox3D; i++)
outimg[i] = 0.0;
int n_steps = (int)ceil(mx / dh);
for (int step = 0; step < n_steps; step++) {
flt thresh = (step + 1) * dh;
memset(vxs, 0, nvox3D * sizeof(uint8_t));
for (int i = 0; i < nvox3D; i++)
if (inimg[i] >= thresh)
vxs[i] = 1; //survives, unclustered
int i = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if (vxs[i] == 0) {
i++;
continue;
} //voxel did not survive or already clustered
int qlo = 0;
int qhi = 0;
q[qhi] = i; //add starting voxel as seed in queue
vxs[i] = 0; //do not find again!
while (qhi >= qlo) { //first in, first out queue
//retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity)
for (int j = 0; j < numk; j++) {
int jj = q[qlo] + k[j];
if ((jj < 0) || (jj >= nvox3D))
continue; //voxel in volume
if (vxs[jj] == 0)
continue; //already found or did not survive threshold
int dx = x + k[j + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + k[j + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
//add new seed:
vxs[jj] = 0; //do not find again!
qhi++;
q[qhi] = jj;
}
qlo++;
} //while qhi >= qlo: continue until all seeds tested
flt valToAdd = pow(qhi + 1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1
for (int j = 0; j <= qhi; j++)
outimg[q[j]] += valToAdd;
i++;
} //for each voxel
} //for each step
for (int i = 0; i < nvox3D; i++)
inimg[i] = outimg[i];
_mm_free(q);
_mm_free(vxs);
_mm_free(outimg);
}
_mm_free(k);
return 0;
} //nifti_tfce()
static int nifti_grid(nifti_image *nim, double v, int spacing) {
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2))
return 1;
if (nim->datatype != DT_CALC)
return 1;
size_t nxy = (nim->nx * nim->ny);
size_t nzt = nim->nvox / nxy;
flt *f32 = (flt *)nim->data;
flt fv = v;
#pragma omp parallel for
for (size_t i = 0; i < nzt; i++) { //for each 2D slices
size_t so = i * nxy; //slice offset
int z = (i % nim->nz);
if ((nim->nz > 1) && ((z % spacing) == 0)) { //whole slice is grid
for (size_t j = 0; j < nxy; j++)
f32[so++] = fv;
continue;
}
for (size_t y = 0; y < nim->ny; y++)
for (size_t x = 0; x < nim->nx; x++) {
if ((x % spacing) == 0)
f32[so] = fv;
so++;
}
so = i * nxy; //slice offset
for (size_t y = 0; y < nim->ny; y++)
for (size_t x = 0; x < nim->nx; x++) {
if ((y % spacing) == 0)
f32[so] = fv;
so++;
}
} //for i: each 2D slice
return 0;
}
static int nifti_rem(nifti_image *nim, double v, int isFrac) {
//remainder (modulo) : fslmaths
/*fmod(0.45, 2) = 0.45 : 0
fmod(0.9, 2) = 0.9 : 0
fmod(1.35, 2) = 1.35 : 1
fmod(1.8, 2) = 1.8 : 1
fmod(-0.45, 2) = -0.45 : 0
fmod(-0.9, 2) = -0.9 : 0
fmod(-1.35, 2) = -1.35 : -1
fmod(-1.8, 2) = -1.8 : -1
*/
if (nim->datatype != DT_CALC)
return 1;
if (nim->nvox < 1)
return 1;
if (v == 0.0) {
fprintf(stderr, "Exception: '-rem 0' does not make sense\n");
return 1;
}
flt fv = v;
flt *f32 = (flt *)nim->data;
if (isFrac) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fmod(f32[i], fv);
} else {
for (size_t i = 0; i < nim->nvox; i++) {
//printf("fmod(%g, %g) = %g : %g\n", f32[i], fv, fmod(f32[i],fv), trunc(fmod(f32[i],fv)) );
f32[i] = trunc(fmod(f32[i], fv));
}
}
return 0;
}
static int nifti_thr(nifti_image *nim, double v, int modifyBrightVoxels, float newIntensity) {
if (nim->nvox < 1)
return 1;
if (nim->datatype == DT_CALC) {
flt fv = v;
flt *f32 = (flt *)nim->data;
if (modifyBrightVoxels) {
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] > fv)
f32[i] = newIntensity;
} else {
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] < fv)
f32[i] = newIntensity;
}
return 0;
}
fprintf(stderr, "nifti_thr: Unsupported datatype %d\n", nim->datatype);
return 1;
} // nifti_thr()
static int nifti_max(nifti_image *nim, double v, int useMin) {
if (nim->nvox < 1)
return 1;
if (nim->datatype == DT_CALC) {
flt fv = v;
flt *f32 = (flt *)nim->data;
if (useMin) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fmin(f32[i], fv);
} else {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fmax(f32[i], fv);
}
return 0;
}
fprintf(stderr, "nifti_max: Unsupported datatype %d\n", nim->datatype);
return 1;
} // nifti_max()
static int nifti_inm(nifti_image *nim, double M) {
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610
//With '-inm <value>', every voxel in the input volume is multiplied by <value> / M
// where M is the mean across all voxels.
//n.b.: regardless of description, mean appears to only include voxels > 0
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0))
return 1;
int nvol = nim->nvox / nvox3D;
flt *f32 = (flt *)nim->data;
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
size_t vi = v * nvox3D;
double sum = 0.0;
#define gt0
#ifdef gt0
int n = 0;
for (size_t i = 0; i < nvox3D; i++) {
if (f32[vi + i] > 0.0f) {
n++;
sum += f32[vi + i];
}
}
if (sum == 0.0)
continue;
double ave = sum / n;
#else
for (int i = 0; i < nvox3D; i++)
sum += f32[vi + i];
if (sum == 0.0)
continue;
double ave = sum / nvox3D;
#endif
//printf("%g %g\n", ave, M);
flt scale = M / ave;
for (int i = 0; i < nvox3D; i++)
f32[vi + i] *= scale;
}
return 0;
} // nifti_inm()
static int nifti_ing(nifti_image *nim, double M) {
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610
//With '-inm <value>', every voxel in the input volume is multiplied by <value> / M
// where M is the mean across all voxels.
//n.b.: regardless of description, mean appears to only include voxels > 0
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *f32 = (flt *)nim->data;
double sum = 0.0;
int n = 0;
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] > 0.0f) {
n++;
sum += f32[i];
}
}
if (sum == 0)
return 0;
double ave = sum / n;
flt scale = M / ave;
#pragma omp parallel for
for (int i = 0; i < nim->nvox; i++)
f32[i] *= scale;
return 0;
} //nifti_ing()
static int nifti_robust_range(nifti_image *nim, flt *pct2, flt *pct98, int ignoreZeroVoxels) {
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;31f309c1.1307
// robust range is essentially the 2nd and 98th percentiles
// "but ensuring that the majority of the intensity range is captured, even for binary images."
// fsl uses 1000 bins, also limits for volumes less than 100 voxels taylor.hanayik@ndcn.ox.ac.uk 20190107
//fslstats trick -r
// 0.000000 1129.141968
//niimath >fslstats trick -R
// 0.000000 2734.000000
*pct2 = 0.0;
*pct98 = 1.0;
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *f32 = (flt *)nim->data;
flt mn = INFINITY;
flt mx = -INFINITY;
size_t nZero = 0;
size_t nNan = 0;
for (size_t i = 0; i < nim->nvox; i++) {
if (isnan(f32[i])) {
nNan++;
continue;
}
if (f32[i] == 0.0) {
nZero++;
if (ignoreZeroVoxels)
continue;
}
mn = fmin(f32[i], mn);
mx = fmax(f32[i], mx);
}
if ((nZero > 0) && (mn > 0.0) && (!ignoreZeroVoxels))
mn = 0.0;
if (mn > mx)
return 0; //all NaN
if (mn == mx) {
*pct2 = mn;
*pct98 = mx;
return 0;
}
if (!ignoreZeroVoxels)
nZero = 0;
nZero += nNan;
size_t n2pct = round((nim->nvox - nZero) * 0.02);
if ((n2pct < 1) || (mn == mx) || ((nim->nvox - nZero) < 100)) { //T Hanayik mentioned issue with very small volumes
*pct2 = mn;
*pct98 = mx;
return 0;
}
#define nBins 1001
flt scl = (nBins - 1) / (mx - mn);
int hist[nBins];
for (int i = 0; i < nBins; i++)
hist[i] = 0;
if (ignoreZeroVoxels) {
for (int i = 0; i < nim->nvox; i++) {
if (isnan(f32[i]))
continue;
if (f32[i] == 0.0)
continue;
hist[(int)round((f32[i] - mn) * scl)]++;
}
} else {
for (int i = 0; i < nim->nvox; i++) {
if (isnan(f32[i]))
continue;
hist[(int)round((f32[i] - mn) * scl)]++;
}
}
size_t n = 0;
size_t lo = 0;
while (n < n2pct) {
n += hist[lo];
//if (lo < 10)
// printf("%zu %zu %zu %d\n",lo, n, n2pct, ignoreZeroVoxels);
lo++;
}
lo--; //remove final increment
n = 0;
int hi = nBins;
while (n < n2pct) {
hi--;
n += hist[hi];
}
/*if ((lo+1) < hi) {
size_t nGray = 0;
for (int i = lo+1; i < hi; i++ ) {
nGray += hist[i];
//printf("%d %d\n", i, hist[i]);
}
float fracGray = (float)nGray/(float)(nim->nvox - nZero);
printf("histogram[%d..%d] = %zu %g\n", lo, hi, nGray, fracGray);
}*/
if (lo == hi) { //MAJORITY are not black or white
int ok = -1;
while (ok != 0) {
if (lo > 0) {
lo--;
if (hist[lo] > 0)
ok = 0;
}
if ((ok != 0) && (hi < (nBins - 1))) {
hi++;
if (hist[hi] > 0)
ok = 0;
}
if ((lo == 0) && (hi == (nBins - 1)))
ok = 0;
} //while not ok
}//if lo == hi
*pct2 = (lo) / scl + mn;
*pct98 = (hi) / scl + mn;
printf("full range %g..%g (voxels 0 or NaN =%zu) robust range %g..%g\n", mn, mx, nZero, *pct2, *pct98);
return 0;
}
enum eDimReduceOp { Tmean,
Tstd,
Tmax,
Tmaxn,
Tmin,
Tmedian,
Tperc,
Tar1 };
static int compare(const void *a, const void *b) {
flt fa = *(const flt *)a;
flt fb = *(const flt *)b;
return (fa > fb) - (fa < fb);
}
static void dtrend(flt *xx, int npt, int pt0) {
//linear detrend, first point is set to zero
// if pt0=0 then mean is zero, pt0=1 then first point is zero, if pt0=2 final point is zero
double t1, t3, t10, x0, x1;
int ii;
if (npt < 2 || xx == NULL)
return;
x0 = xx[0];
x1 = 0.0;
for (ii = 1; ii < npt; ii++) {
x0 += xx[ii];
x1 += xx[ii] * ii;
}
t1 = npt * x0;
t3 = 1.0 / npt;
t10 = npt * npt;
double f0 = (double)(2.0 / (npt + 1.0) * t3 * (2.0 * t1 - 3.0 * x1 - x0));
double f1 = (double)(-6.0 / (t10 - 1.0) * t3 * (-x0 - 2.0 * x1 + t1));
//printf("%.8g %.8g %g\n", f0, f1, xx[0]);
if (pt0 == 1)
f0 = xx[0];
if (pt0 == 2)
f0 = xx[npt - 1] - (f1 * (npt - 1));
for (ii = 0; ii < npt; ii++)
xx[ii] -= (f0 + f1 * ii);
}
static int nifti_detrend_linear(nifti_image *nim) {
if (nim->datatype != DT_CALC)
return 1;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 2) {
fprintf(stderr, "detrend requires a 4D image with at least three volumes\n");
return 1;
}
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
flt *data = (flt *)_mm_malloc(nvol * sizeof(flt), 64);
//load one voxel across all timepoints
int j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
data[j] = img[v];
j++;
}
//detrend
dtrend(data, nvol, 0);
//save one voxel across all timepoints
j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
img[v] = data[j];
j++;
}
_mm_free(data);
}
return 0;
}
#ifdef bandpass
//https://github.com/QtSignalProcessing/QtSignalProcessing/blob/master/src/iir.cpp
//https://github.com/rkuchumov/day_plot_diagrams/blob/8df48af431dc76b1656a627f1965d83e8693ddd7/data.c
//https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
// Sample rate and desired cutoff frequencies (in Hz).
// double highcut = 1250;
// double lowcut = 500;
// double samp_rate = 5000;
//[b,a] = butter(2, [0.009, 0.08]);
//https://afni.nimh.nih.gov/afni/community/board/read.php?1,84373,137180#msg-137180
//Power 2011, Satterthwaite 2013, Carp 2011, Power's reply to Carp 2012
// https://github.com/lindenmp/rs-fMRI/blob/master/func/ButterFilt.m
//https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
/*
The function butterworth_filter() emulates Jan Simon's FiltFiltM
it uses Gustafsson’s method and padding to reduce ringing at start/end
https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm?focused=5193423&tab=function
Copyright (c) 2011, Jan Simon
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.*/
static int butterworth_filter(flt *img, int nvox3D, int nvol, double fs, double highcut, double lowcut) {
//sample rate, low cut and high cut are all in Hz
//this attempts to emulate performance of https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm
// specifically, prior to the forward and reverse pass the coefficients are estimated by a forward and reverse pass
int order = 2;
if (order <= 0)
return 1;
if ((highcut <= 0.0) && (lowcut <= 0.0))
return 1;
if (fs <= 0.0)
return 1;
if ((lowcut > 0.0) && (highcut > 0.0))
printf("butter bandpass lowcut=%g highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, highcut, fs, order, 2 * order);
else if (highcut > 0.0)
printf("butter lowpass highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", highcut, fs, order, 2 * order);
else if (lowcut > 0.0)
printf("butter highpass lowcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, fs, order, 2 * order);
else {
printf("Butterworth parameters do not make sense\n");
return 1;
}
double *a;
double *b;
double *IC;
int nX = nvol;
int nA = 0;
nA = butter_design(order, 2.0 * lowcut / fs, 2.0 * highcut / fs, &a, &b, &IC);
int nEdge = 3 * (nA - 1);
if ((nA < 1) || (nX <= nEdge)) {
printf("filter requires at least %d samples\n", nEdge);
_mm_free(a);
_mm_free(b);
_mm_free(IC);
return 1;
}
#pragma omp parallel for
for (int vx = 0; vx < nvox3D; vx++) {
double *X = (double *)_mm_malloc(nX * sizeof(double), 64);
size_t vo = vx;
flt mn = INFINITY;
flt mx = -INFINITY;
for (int j = 0; j < nX; j++) {
X[j] = img[vo];
mn = MIN(mn, X[j]);
mx = MAX(mx, X[j]);
vo += nvox3D;
}
if (mn < mx) { //some variability
double *Xi = (double *)_mm_malloc(nEdge * sizeof(double), 64);
for (int i = 0; i < nEdge; i++)
Xi[nEdge - i - 1] = X[0] - (X[i + 1] - X[0]);
double *CC = (double *)_mm_malloc((nA - 1) * sizeof(double), 64);
for (int i = 0; i < (nA - 1); i++)
CC[i] = IC[i] * Xi[0];
double *Xf = (double *)_mm_malloc(nEdge * sizeof(double), 64);
for (int i = 0; i < nEdge; i++)
Xf[i] = X[nX - 1] - (X[nX - 2 - i] - X[nX - 1]);
Filt(Xi, nEdge, a, b, nA - 1, CC); //filter head
Filt(X, nX, a, b, nA - 1, CC); //filter array
Filt(Xf, nEdge, a, b, nA - 1, CC); //filter tail
//reverse
for (int i = 0; i < (nA - 1); i++)
CC[i] = IC[i] * Xf[nEdge - 1];
FiltRev(Xf, nEdge, a, b, nA - 1, CC); //filter tail
FiltRev(X, nX, a, b, nA - 1, CC); //filter array
_mm_free(Xi);
_mm_free(Xf);
_mm_free(CC);
} else { //else no variability: set all voxels to zero
for (int j = 0; j < nX; j++)
X[j] = 0;
}
//save data to 4D array
vo = vx;
for (int j = 0; j < nX; j++) {
img[vo] = X[j];
vo += nvox3D;
}
_mm_free(X);
} //for vx
_mm_free(b);
_mm_free(a);
_mm_free(IC);
return 0;
}
static int nifti_bandpass(nifti_image *nim, double hp_hz, double lp_hz, double TRsec) {
if (nim->datatype != DT_CALC)
return 1;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (TRsec <= 0.0)
TRsec = nim->pixdim[4];
if (TRsec <= 0) {
fprintf(stderr, "Unable to determine sample rate\n");
return 1;
}
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
fprintf(stderr, "bandpass requires 4D datasets\n");
return 1;
}
return butterworth_filter((flt *)nim->data, nvox3D, nvol, 1 / TRsec, hp_hz, lp_hz);
}
#endif
//#define DEBUG_ENABLED
#ifdef DEBUG_ENABLED
static int xyzt2txyz(nifti_image *nim) {
size_t nxyz = nim->nx * nim->ny * nim->nz;
size_t nt = nim->nt;
if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nt < 2))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *img = (flt *)nim->data;
flt *inimg = (flt *)_mm_malloc(nxyz * nt * sizeof(flt), 64); //alloc for each volume to allow openmp
memcpy(inimg, img, nim->nvox * sizeof(flt));
size_t i = 0;
#pragma omp parallel for
for (size_t x = 0; x < nxyz; x++) {
for (size_t t = 0; t < nt; t++) {
img[i] = inimg[x + t * nxyz];
i++;
}
}
_mm_free(inimg);
return 0;
}
static int txyz2xyzt(nifti_image *nim) {
size_t nxyz = nim->nx * nim->ny * nim->nz;
size_t nt = nim->nt;
if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nt < 2))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *img = (flt *)nim->data;
flt *inimg = (flt *)_mm_malloc(nxyz * nt * sizeof(flt), 64); //alloc for each volume to allow openmp
memcpy(inimg, img, nim->nvox * sizeof(flt));
size_t i = 0;
#pragma omp parallel for
for (size_t x = 0; x < nxyz; x++) {
for (size_t t = 0; t < nt; t++) {
img[x + t * nxyz] = inimg[i];
i++;
}
}
_mm_free(inimg);
return 0;
}
static int nifti_bptf(nifti_image *nim, double hp_sigma, double lp_sigma, int demean) {
//Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m
//5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1
// http://www.fast.u-psud.fr/ezyfit/html/ezfit.html
//gauss fitting functions: y = a*exp(-(x-x0)^2/(2*s^2))
// regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight
if (nim->datatype != DT_CALC)
return 1;
if ((hp_sigma <= 0) && (lp_sigma <= 0))
return 0;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
fprintf(stderr, "bptf requires 4D datasets\n");
return 1;
}
int *hpStart, *hpEnd;
double *hpSumX, *hpDenom, *hpSumWt, *hp, *hp0;
if (hp_sigma > 0) { //initialize high-pass reusables
//Spielberg's code uses 8*sigma, does not match current fslmaths:
//tested with fslmaths freq4d -bptf 10 -1 nhp
//cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412
int cutoffhp = ceil(3 * hp_sigma); //to do: check this! ~3
hp = (double *)_mm_malloc((cutoffhp + 1 + cutoffhp) * sizeof(double), 64); //-cutoffhp..+cutoffhp
hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp
for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel
hp0[k] = exp(-sqr(k) / (2 * sqr(hp_sigma)));
hpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpSumX = (double *)_mm_malloc(nvol * sizeof(double), 64); //
hpDenom = (double *)_mm_malloc(nvol * sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2
hpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
//linear regression with "gauss" fitting
hpStart[v] = MAX(0, v - cutoffhp);
hpEnd[v] = MIN(nvol - 1, v + cutoffhp);
double sumX = 0.0;
double sumX2 = 0.0;
double sumWt = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x]; //kernel weight
sumX += wt * x;
sumX2 += wt * x * x;
sumWt += wt;
}
hpSumX[v] = sumX;
hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2
if (hpDenom[v] == 0.0)
hpDenom[v] = 1.0; //should never happen, x is known index
hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later
hpSumWt[v] = sumWt;
} //for each volume
} //high-pass reusables
//low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp
int *lpStart, *lpEnd;
double *lpSumWt, *lp, *lp0;
if (lp_sigma > 0) { //initialize low-pass reusables
//simple Gaussian blur in time domain
//freq4d -bptf -1 5 flp
// fslmaths rest -bptf -1 5 flp
// 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical
// Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive
int cutofflp = ceil(8 * lp_sigma); //to do: check this! at least 6
lp = (double *)_mm_malloc((cutofflp + 1 + cutofflp) * sizeof(double), 64); //-cutofflp..+cutofflp
lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp
for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel
lp0[k] = exp(-sqr(k) / (2 * sqr(lp_sigma)));
lpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
lpStart[v] = MAX(0, v - cutofflp);
lpEnd[v] = MIN(nvol - 1, v + cutofflp);
double sumWt = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sumWt += lp0[k - v]; //kernel weight
if (sumWt == 0.0)
sumWt = 1.0; //will never happen
lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later
} //for each volume
} //low-pass reusables
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902
//if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1"
//The 'cutoff' is defined as the FWHM of the filter, so if you ask for
//100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs.
// -bptf <hp_sigma> <lp_sigma>
xyzt2txyz(nim);
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
//read input data
flt *imgIn = (flt *)_mm_malloc((nvol) * sizeof(flt), 64);
flt *imgOut = img + (i * nvol);
memcpy(imgIn, imgOut, nvol * sizeof(flt));
if (hp_sigma > 0) {
double sumOut = 0.0;
for (int v = 0; v < nvol; v++) { //each volume
double sumY = 0.0;
double sumXY = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x];
flt y = imgIn[k];
sumY += wt * y;
sumXY += wt * x * y;
}
double n = hpSumWt[v];
double m = ((n * sumXY) - (hpSumX[v] * sumY)) * hpDenom[v]; //slope
double b = (sumY - (m * hpSumX[v])) / n; //intercept
imgOut[v] = imgIn[v] - b;
sumOut += imgOut[v];
} //for each volume
//"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass
double mean = sumOut / (double)nvol; //de-mean AFTER high-pass
if (demean) {
for (int v = 0; v < nvol; v++) //each volume
imgOut[v] -= mean;
}
} //hp_sigma > 0
if (lp_sigma > 0) { //low pass does not de-mean data
//if BOTH low-pass and high-pass, apply low pass AFTER high pass:
// fslmaths freq4d -bptf 45 5 fbp
// difference 1.86265e-08
//still room for improvement:
// fslmaths /Users/chris/src/rest -bptf 45 5 fbp
// r=1.0 identical voxels 73% max difference 0.000488281
if (hp_sigma > 0)
memcpy(imgIn, imgOut, nvol * sizeof(flt));
for (int v = 0; v < nvol; v++) { //each volume
double sum = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sum += imgIn[k] * lp0[k - v];
imgOut[v] = sum * lpSumWt[v];
} // for each volume
} //lp_sigma > 0
_mm_free(imgIn);
}
txyz2xyzt(nim);
if (hp_sigma > 0) { //initialize high-pass reuseables
_mm_free(hp);
_mm_free(hpStart);
_mm_free(hpEnd);
_mm_free(hpSumX);
_mm_free(hpDenom);
_mm_free(hpSumWt);
}
if (lp_sigma > 0) { //initialize high-pass reuseables
_mm_free(lp);
_mm_free(lpStart);
_mm_free(lpEnd);
_mm_free(lpSumWt);
}
return 0;
} // nifti_bptf()
#else
static int nifti_bptf(nifti_image *nim, double hp_sigma, double lp_sigma, int demean) {
//Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m
//5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1
// http://www.fast.u-psud.fr/ezyfit/html/ezfit.html
//gauss fitting functions: y = a*exp(-(x-x0)^2/(2*s^2))
// regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight
if (nim->datatype != DT_CALC)
return 1;
if ((hp_sigma <= 0) && (lp_sigma <= 0))
return 0;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
fprintf(stderr, "bptf requires 4D datasets\n");
return 1;
}
int *hpStart, *hpEnd;
double *hpSumX, *hpDenom, *hpSumWt, *hp, *hp0;
if (hp_sigma > 0) { //initialize high-pass reusables
//Spielberg's code uses 8*sigma, does not match current fslmaths:
//tested with fslmaths freq4d -bptf 10 -1 nhp
//cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412
int cutoffhp = ceil(3 * hp_sigma); //to do: check this! ~3
hp = (double *)_mm_malloc((cutoffhp + 1 + cutoffhp) * sizeof(double), 64); //-cutoffhp..+cutoffhp
hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp
for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel
hp0[k] = exp(-sqr(k) / (2 * sqr(hp_sigma)));
hpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpSumX = (double *)_mm_malloc(nvol * sizeof(double), 64);//
hpDenom = (double *)_mm_malloc(nvol * sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2
hpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
//linear regression with "gauss" fitting
hpStart[v] = MAX(0, v - cutoffhp);
hpEnd[v] = MIN(nvol - 1, v + cutoffhp);
double sumX = 0.0;
double sumX2 = 0.0;
double sumWt = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x]; //kernel weight
sumX += wt * x;
sumX2 += wt * x * x;
sumWt += wt;
}
hpSumX[v] = sumX;
hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2
if (hpDenom[v] == 0.0)
hpDenom[v] = 1.0; //should never happen, x is known index
hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later
hpSumWt[v] = sumWt;
} //for each volume
} //high-pass reusables
//low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp
int *lpStart, *lpEnd;
double *lpSumWt, *lp, *lp0;
if (lp_sigma > 0) { //initialize low-pass reusables
//simple Gaussian blur in time domain
//freq4d -bptf -1 5 flp
// fslmaths rest -bptf -1 5 flp
// 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical
// Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive
int cutofflp = ceil(8 * lp_sigma); //to do: check this! at least 6
lp = (double *)_mm_malloc((cutofflp + 1 + cutofflp) * sizeof(double), 64); //-cutofflp..+cutofflp
lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp
for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel
lp0[k] = exp(-sqr(k) / (2 * sqr(lp_sigma)));
lpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
lpStart[v] = MAX(0, v - cutofflp);
lpEnd[v] = MIN(nvol - 1, v + cutofflp);
double sumWt = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sumWt += lp0[k - v]; //kernel weight
if (sumWt == 0.0)
sumWt = 1.0; //will never happen
lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later
} //for each volume
} //low-pass reusables
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902
//if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1"
//The 'cutoff' is defined as the FWHM of the filter, so if you ask for
//100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs.
// -bptf <hp_sigma> <lp_sigma>
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
//read input data
flt *imgIn = (flt *)_mm_malloc((nvol) * sizeof(flt), 64);
flt *imgOut = (flt *)_mm_malloc((nvol) * sizeof(flt), 64);
int j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
imgIn[j] = img[v];
j++;
}
if (hp_sigma > 0) {
double sumOut = 0.0;
for (int v = 0; v < nvol; v++) { //each volume
double sumY = 0.0;
double sumXY = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x];
flt y = imgIn[k];
sumY += wt * y;
sumXY += wt * x * y;
}
double n = hpSumWt[v];
double m = ((n * sumXY) - (hpSumX[v] * sumY)) * hpDenom[v]; //slope
double b = (sumY - (m * hpSumX[v])) / n; //intercept
imgOut[v] = imgIn[v] - b;
sumOut += imgOut[v];
} //for each volume
//"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass
double mean = sumOut / (double)nvol; //de-mean AFTER high-pass
if (demean) {
for (int v = 0; v < nvol; v++) //each volume
imgOut[v] -= mean;
}
} //hp_sigma > 0
if (lp_sigma > 0) { //low pass does not de-mean data
//if BOTH low-pass and high-pass, apply low pass AFTER high pass:
// fslmaths freq4d -bptf 45 5 fbp
// difference 1.86265e-08
//still room for improvement:
// fslmaths /Users/chris/src/rest -bptf 45 5 fbp
// r=1.0 identical voxels 73% max difference 0.000488281
if (hp_sigma > 0)
memcpy(imgIn, imgOut, nvol * sizeof(flt));
for (int v = 0; v < nvol; v++) { //each volume
double sum = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sum += imgIn[k] * lp0[k - v];
imgOut[v] = sum * lpSumWt[v];
} // for each volume
} //lp_sigma > 0
//write filtered data
j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
img[v] = imgOut[j];
j++;
}
_mm_free(imgIn);
_mm_free(imgOut);
}
if (hp_sigma > 0) { //initialize high-pass reuseables
_mm_free(hp);
_mm_free(hpStart);
_mm_free(hpEnd);
_mm_free(hpSumX);
_mm_free(hpDenom);
_mm_free(hpSumWt);
}
if (lp_sigma > 0) { //initialize high-pass reuseables
_mm_free(lp);
_mm_free(lpStart);
_mm_free(lpEnd);
_mm_free(lpSumWt);
}
return 0;
} // nifti_bptf()
#endif
static int nifti_demean(nifti_image *nim) {
if (nim->datatype != DT_CALC)
return 1;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
fprintf(stderr, "demean requires 4D datasets\n");
return 1;
}
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
double sum = 0.0;
for (size_t v = i; v < nim->nvox; v += nvox3D)
sum += img[v];
double mean = sum / nvol;
for (size_t v = i; v < nim->nvox; v += nvox3D)
img[v] -= mean;
}
return 0;
}
static int nifti_dim_reduce(nifti_image *nim, enum eDimReduceOp op, int dim, int percentage) {
//e.g. nifti_dim_reduce(nim, Tmean, 4) reduces 4th dimension, saving mean
int nReduce = nim->dim[dim];
if ((nReduce <= 1) || (dim < 1) || (dim > 4))
return 0; //nothing to reduce, fslmaths does not generate an error
if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1))
return 1;
//size_t nvox3D = nim->nx * nim->ny * nim->nz;
//int nvol = nim->nvox / nvox3D;
//if ((nvox3D * nvol) != nim->nvox) return 1;
if (nim->datatype != DT_CALC)
return 1;
if (nim->dim[0] > 4)
fprintf(stderr, "dimension reduction collapsing %" PRId64 "D into to 4D\n", nim->dim[0]);
int dims[8], indims[8];
for (int i = 0; i < 4; i++)
dims[i] = MAX(nim->dim[i], 1);
//XYZT limits to 4 dimensions, so collapse dims [4,5,6,7]
dims[4] = nim->nvox / (dims[1] * dims[2] * dims[3]);
for (int i = 5; i < 8; i++)
dims[i] = 1;
for (int i = 0; i < 8; i++)
indims[i] = dims[i];
if ((dims[1] * dims[2] * dims[3] * dims[4]) != nim->nvox)
return 1; //e.g. data in dim 5..7!
dims[dim] = 1;
if (dim == 4)
dims[0] = 3; //reduce 4D to 3D
size_t nvox = dims[1] * dims[2] * dims[3] * dims[4];
flt *i32 = (flt *)nim->data;
void *dat = (void *)calloc(1, nim->nvox * sizeof(flt));
flt *o32 = (flt *)dat;
int collapseStep; //e.g. if we collapse 4th dimension, we will collapse across voxels separated by X*Y*Z
if (dim == 1)
collapseStep = 1; //collapse by columns
else if (dim == 2)
collapseStep = indims[1]; //collapse by rows
else if (dim == 3)
collapseStep = indims[1] * indims[2]; //collapse by slices
else
collapseStep = indims[1] * indims[2] * indims[3]; //collapse by volumes
int xy = dims[1] * dims[2];
int xyz = xy * dims[3];
if ((op == Tmedian) || (op == Tstd) || (op == Tperc) || (op == Tar1)) {
//for even number of items, two options for median, consider 4 volumes ranked
// meam of 2nd and 3rd: problem one can return values not in data
// 2nd value. Representative
//here we use the latter approach
//int itm = ((nReduce-1) * 0.5);
int itm = (nReduce * 0.5); //seems correct tested with odd and even number of volumes
if (op == Tperc) {
double frac = ((double)percentage) / 100.0;
//itm = ((nReduce-1) * frac);
itm = ((nReduce)*frac);
itm = MAX(itm, 0);
itm = MIN(itm, nReduce - 1);
}
#pragma omp parallel for
for (size_t i = 0; i < nvox; i++) {
flt *vxls = (flt *)_mm_malloc((nReduce) * sizeof(flt), 64);
size_t inPos = i;
if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP
int T = (i / xyz); //volume
int r = i % (xyz);
int Z = (r / xy); //slice
r = r % (xy);
int Y = (r / dims[1]); //row
int X = r % dims[1];
inPos = X + (Y * indims[1]) + (Z * indims[1] * indims[2]) + (T * indims[1] * indims[2] * indims[3]);
}
for (int v = 0; v < nReduce; v++) {
vxls[v] = i32[inPos];
inPos += collapseStep;
}
if ((op == Tstd) || (op == Tar1)) {
//computed in cache, far fewer operations than Welford
//note 64-bit double precision even if 32-bit DT_CALC
//neither precision gives identical results
// double precision attenuates catastrophic cancellation
double sum = 0.0;
for (int v = 0; v < nReduce; v++)
sum += vxls[v];
double mean = sum / nReduce;
double sumSqr = 0.0;
for (int v = 0; v < nReduce; v++)
sumSqr += sqr(vxls[v] - mean);
if (op == Tstd)
o32[i] = sqrt(sumSqr / (nReduce - 1));
else { //Tar1
if (sumSqr == 0.0) {
o32[i] = 0.0;
continue;
}
for (int v = 0; v < nReduce; v++)
vxls[v] = vxls[v] - mean; //demean
double r = 0.0;
for (int v = 1; v < nReduce; v++)
r += (vxls[v] * vxls[v - 1]) / sumSqr;
o32[i] = r;
}
} else { //Tperc or Tmedian
qsort(vxls, nReduce, sizeof(flt), compare);
o32[i] = vxls[itm];
}
_mm_free(vxls);
} //for i: each voxel
} else {
#pragma omp parallel for
for (size_t i = 0; i < nvox; i++) {
size_t inPos = i; //ok if dim==4
if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP
int T = (i / xyz); //volume
int r = i % (xyz);
int Z = (r / xy); //slice
r = r % (xy);
int Y = (r / dims[1]); //row
int X = r % dims[1];
inPos = X + (Y * indims[1]) + (Z * indims[1] * indims[2]) + (T * indims[1] * indims[2] * indims[3]);
}
double sum = 0.0;
flt mx = i32[inPos];
flt mn = mx;
int mxn = 0;
//flt sd = 0.0;
//flt mean = 0.0;
for (int v = 0; v < nReduce; v++) {
flt f = i32[inPos];
sum += f;
if (f > mx) {
mx = f;
mxn = v;
}
mn = MIN(mn, f);
//Welford https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
// 2-pass method faster
//flt delta = f - mean;
//mean = mean + delta / (v+1);
//sd = sd + delta*(f- mean);
inPos += collapseStep;
}
if (op == Tmean)
o32[i] = sum / nReduce; //mean
else if (op == Tmax)
o32[i] = mx; //max
else if (op == Tmaxn)
o32[i] = mxn; //maxn
else if (op == Tmin)
o32[i] = mn; //min
}
} //if opel
nim->nvox = nvox;
for (int i = 0; i < 4; i++)
nim->dim[i] = dims[i];
nim->ndim = dims[0];
nim->nx = dims[1];
nim->ny = dims[2];
nim->nz = dims[3];
nim->nt = dims[4];
nim->nu = dims[5];
nim->nv = dims[6];
nim->nw = dims[7];
free(nim->data);
nim->data = dat;
return 0;
} //Tar1
enum eOp { unknown,
add,
sub,
mul,
divX,
rem,
mod,
mas,
thr,
thrp,
thrP,
uthr,
uthrp,
uthrP,
clamp,
uclamp,
max,
min,
power,
seed,
inm,
ing,
smth,
exp1,
floor1,
round1,
trunc1,
ceil1,
log1,
sin1,
cos1,
tan1,
asin1,
acos1,
atan1,
sqr1,
sqrt1,
recip1,
abs1,
bin1,
binv1,
edge1,
index1,
nan1,
nanm1,
rand1,
randn1,
range1,
rank1,
ranknorm1,
pval1,
pval01,
cpval1,
ztop1,
ptoz1,
dilMk,
dilDk,
dilFk,
dilallk,
erok,
eroFk,
fmediank,
fmeank,
fmeanuk,
subsamp2,
subsamp2offc
};
static int *make_kernel_gauss(nifti_image *nim, int *nkernel, double sigmamm) {
sigmamm = fabs(sigmamm);
if (sigmamm == 0.0)
return NULL;
double mmCutoff = sigmamm * 6.0; //maximum extent
int x = (2 * floor(mmCutoff / nim->dx)) + 1;
int y = (2 * floor(mmCutoff / nim->dy)) + 1;
int z = (2 * floor(mmCutoff / nim->dz)) + 1;
int xlo = (int)(-x / 2);
int ylo = (int)(-y / 2);
int zlo = (int)(-z / 2);
//betterthanfsl
// fsl computes gaussian for all values in cube
// from first principles, a spherical filter has less bias
// since weighting is very low at these edge voxels, it has little impact on
// "-fmean", however with other filters like "dilM", fsl's solution works like
// a "box" filter, not a "sphere" filter
// default is to clone fsl
#ifdef betterthanfsl //true sphere at cutouff
//first pass: determine number of surviving voxels (n)
int n = 0;
for (int zi = zlo; zi < (zlo + z); zi++)
for (int yi = ylo; yi < (ylo + y); yi++)
for (int xi = xlo; xi < (xlo + x); xi++) {
flt dx = (xi * nim->dx);
flt dy = (yi * nim->dy);
flt dz = (zi * nim->dz);
flt dist = sqrt(dx * dx + dy * dy + dz * dz);
if (dist > mmCutoff)
continue;
n++;
}
*nkernel = n;
int kernelWeight = (int)((double)INT_MAX / (double)n); //requires <limits.h>
int *kernel = (int *)_mm_malloc((n * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
double *wt = (double *)_mm_malloc((n) * sizeof(double), 64); //precess weight: temporary
//second pass: fill surviving voxels
int i = 0;
double expd = 2.0 * sigmamm * sigmamm;
for (int zi = zlo; zi < (zlo + z); zi++)
for (int yi = ylo; yi < (ylo + y); yi++)
for (int xi = xlo; xi < (xlo + x); xi++) {
flt dx = (xi * nim->dx);
flt dy = (yi * nim->dy);
flt dz = (zi * nim->dz);
flt dist = sqrt(dx * dx + dy * dy + dz * dz);
if (dist > mmCutoff)
continue;
kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny);
kernel[i + n] = xi; //left-right wrap detection
kernel[i + n + n] = yi; //anterior-posterior wrap detection
//kernel[i+n+n+n] = kernelWeight; //kernel height
wt[i] = exp(-1.0 * (dist * dist) / expd);
i++;
}
#else
int n = x * y * z;
*nkernel = n;
int *kernel = (int *)_mm_malloc((n * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
double *wt = (double *)_mm_malloc((n) * sizeof(double), 64); //precess weight: temporary
int i = 0;
double expd = 2.0 * sigmamm * sigmamm;
for (int zi = zlo; zi < (zlo + z); zi++)
for (int yi = ylo; yi < (ylo + y); yi++)
for (int xi = xlo; xi < (xlo + x); xi++) {
flt dx = (xi * nim->dx);
flt dy = (yi * nim->dy);
flt dz = (zi * nim->dz);
flt dist = sqrt(dx * dx + dy * dy + dz * dz);
//if (dist > mmCutoff) continue; //<- fsl fills all
kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny);
kernel[i + n] = xi; //left-right wrap detection
kernel[i + n + n] = yi; //anterior-posterior wrap detection
//kernel[i+n+n+n] = kernelWeight; //kernel height
wt[i] = exp(-1.0 * (dist * dist) / expd);
i++;
}
#endif
double sum = 0.0;
for (int i = 0; i < n; i++)
sum += wt[i];
//sum of entire gaussian is 1
double scale = 1.0 / sum;
scale *= (double)INT_MAX; //we use integer scaling: in future faster to typecast integer as flt (if int=32bit) or double (if int=64bit)
for (int i = 0; i < n; i++)
kernel[i + n + n + n] = wt[i] * scale;
_mm_free(wt);
return kernel;
} //make_kernel_gauss()
static flt calmax(nifti_image *nim) {
if ((nim->nvox < 1) || (nim->datatype != DT_CALC))
return 0.0;
flt *in32 = (flt *)nim->data;
flt mx = in32[0];
for (size_t i = 0; i < nim->nvox; i++)
mx = MAX(mx, in32[i]);
return mx;
}
static flt calmin(nifti_image *nim) {
if ((nim->nvox < 1) || (nim->datatype != DT_CALC))
return 0.0;
flt *in32 = (flt *)nim->data;
flt mn = in32[0];
for (size_t i = 0; i < nim->nvox; i++)
mn = MIN(mn, in32[i]);
return mn;
}
static int nifti_tensor_2(nifti_image *nim, int lower2upper) {
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
if ((nim->datatype != DT_CALC) || (nvox3D < 1))
return 1;
int nVol = (int)(nim->nvox / nvox3D);
if (nVol != 6) {
fprintf(stderr, "nifti_tensor_2: input must have precisely 6 volumes (not %d)\n", nVol);
return 1;
}
//3dAFNItoNIFTI does not set intent_code to NIFTI_INTENT_SYMMATRIX, so check dimensions
if ((lower2upper) && (nim->dim[4] == 6))
fprintf(stderr, "nifti_tensor_2: check images (header suggests already in upper triangle format)\n");
if ((!lower2upper) && (nim->dim[4] == 6))
fprintf(stderr, "nifti_tensor_2: check images (header suggests already in lower triangle format)\n");
//lower xx xy yy xz yz zz
//upper xx xy xz yy yz zz
//swap volumes 3 and 4
flt *in32 = (flt *)nim->data;
flt *tmp = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);
flt *v3 = in32 + (2 * nvox3D);
flt *v4 = in32 + (3 * nvox3D);
memcpy(tmp, v4, nvox3D * sizeof(flt));
memcpy(v4, v3, nvox3D * sizeof(flt));
memcpy(v3, tmp, nvox3D * sizeof(flt));
_mm_free(tmp);
if (lower2upper) {
//FSL uses non-standard upper triangle
nim->dim[0] = 4;
for (int i = 4; i < 8; i++)
nim->dim[i] = 1;
nim->dim[4] = 6;
nim->ndim = 4;
nim->nt = 6;
nim->nu = 1;
nim->nv = 1;
nim->nw = 1;
} else { //upper2lower
//lower is NIfTI default, used by AFNI, Camino, ANTS
nim->intent_code = NIFTI_INTENT_SYMMATRIX;
/*! To store an NxN symmetric matrix at each voxel:
- dataset must have a 5th dimension
- intent_code must be NIFTI_INTENT_SYMMATRIX
- dim[5] must be N*(N+1)/2
- intent_p1 must be N (in float format)
- the matrix values A[i][[j] are stored in row-order:
- A[0][0]
- A[1][0] A[1][1]
- A[2][0] A[2][1] A[2][2]
- etc.: row-by-row*/
nim->dim[0] = 5;
for (int i = 4; i < 8; i++)
nim->dim[i] = 1;
nim->dim[5] = 6;
nim->ndim = 5;
nim->nt = 1;
nim->nu = 6;
nim->nv = 1;
nim->nw = 1;
}
return 0;
}
static int nifti_tensor_decomp(nifti_image *nim, int isUpperTriangle) {
// MD= (Dxx+Dyy+Dzz)/3
//https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software
// dtifit produces upper-triangular order: xx xy xz yy yz zz
//MD = 1/3*(Dxx+Dyy+Dzz)
//FA= sqrt(3/2)*sqrt(((Dx-MD)^2+(Dy-MD)^2+(Dz-MD^2))/(Dx^2+Dy^2+Dz^2))
//fslmaths tensor.nii -tensor_decomp bork.nii
// 3dDTeig -uddata -sep_dsets -prefix AFNIdwi.nii tensor.nii
//3dDTeig expects LOWER diagonal order unless -uddata
// Dxx,Dxy,Dyy,Dxz,Dyz,Dzz
// https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDTeig.html
//dxx, dxy, dyy, dxz, dyz, dzz
// 3dDTeig -uddata -prefix AFNIdwi.nii tensor.nii
// fslmaths tensor.nii -tensor_decomp bork.nii
// Creates 5*3D and 3*4D files for a total of 14 volumes L1,L2,L3,V1(3),V2(3),V3(3),FA,MD
#ifdef tensor_decomp
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nVol = (int)(nim->nvox / nvox3D);
if (nVol != 6) {
fprintf(stderr, "nifti_tensor_decomp: input must have precisely 6 volumes (not %d)\n", nVol);
return 1;
}
flt *in32 = (flt *)nim->data;
//detect if data is upper or lower triangle
// The "YY" component should be brighter (strongly positive) than the off axis XZ
#define detectUpperOrLower
#ifdef detectUpperOrLower
double sumV3 = 0.0; //3rd volume, YY for lower, XZ for upper
double sumV4 = 0.0; //4th volume, XZ for lower, YY for upper
flt *v32 = in32 + (nvox3D * 2); //offset to 3rd volume
for (size_t i = 0; i < nvox3D; i++)
sumV3 += v32[i];
v32 = in32 + (nvox3D * 3); //offset to 4th volume
for (size_t i = 0; i < nvox3D; i++)
sumV4 += v32[i];
if ((sumV4 > sumV3) && (!isUpperTriangle))
fprintf(stderr, "nifti_tensor_decomp: check results, input looks like UPPER triangle.\n");
if ((sumV4 < sumV3) && (isUpperTriangle))
fprintf(stderr, "nifti_tensor_decomp: check results, input looks like LOWER triangle.\n");
#endif
flt *out32 = (flt *)_mm_malloc(14 * nvox3D * sizeof(flt), 64);
for (size_t i = 0; i < nvox3D; i++) {
//n.b. in6 and out14 are ALWAYS float regradless of DT32, e.g. single even if DT=double
float *in6 = (float *)_mm_malloc(6 * sizeof(float), 64);
float *out14 = (float *)_mm_malloc(14 * sizeof(float), 64);
size_t iv = i;
for (int v = 0; v < 6; v++) {
in6[v] = in32[iv];
iv += nvox3D;
}
EIG_tsfunc(0.0, 0.0, 0, in6, 0.0, 0.0, NULL, 0, out14, isUpperTriangle);
size_t ov = i;
for (int v = 0; v < 14; v++) {
out32[ov] = out14[v];
ov += nvox3D;
}
_mm_free(out14);
_mm_free(in6);
}
free(nim->data);
// Creates 5*3D and 3*4D files for a total of 14 volumes L1(0),L2(1),L3(2),V1(3,4,5),V2(6,7,8),V3(9,10,11),FA(12),MD(13)
flt *outv;
//save 4D images
nim->cal_min = -1;
nim->cal_max = 1;
nim->nvox = nvox3D * 3;
nim->ndim = 4;
nim->nt = 3;
nim->nu = 1;
nim->nv = 1;
nim->nw = 1;
nim->dim[0] = 4;
nim->dim[4] = 3;
for (int i = 5; i < 8; i++)
nim->dim[i] = 1;
//void * dat = (void *)calloc(1, 3*nvox3D * sizeof(flt)) ;
//nim->data = dat;
//flt * fa32 = (flt *) dat;
//save V1
outv = out32 + (nvox3D * 3);
nim->data = (void *)outv;
nifti_save(nim, "_V1");
//save V2
outv = out32 + (nvox3D * 6);
//memcpy(fa32, outv, 3*nvox3D*sizeof(flt));
nim->data = (void *)outv;
nifti_save(nim, "_V2");
//save V3
outv = out32 + (nvox3D * 9);
//memcpy(fa32, outv, 3*nvox3D*sizeof(flt));
nim->data = (void *)outv;
nifti_save(nim, "_V3");
//release 4D memory
//free(dat);
//save 3D images
nim->cal_min = 0;
nim->cal_max = 0;
nim->nvox = nvox3D * 1;
nim->ndim = 3;
nim->nt = 1;
nim->dim[0] = 3;
nim->dim[4] = 1;
//save L1
outv = out32;
//memcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_max = calmax(nim);
nifti_save(nim, "_L1");
//save L2
outv = out32 + (nvox3D * 1);
//memcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_max = calmax(nim);
nifti_save(nim, "_L2");
//save L3
outv = out32 + (nvox3D * 2);
//memcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_max = calmax(nim);
nifti_save(nim, "_L3");
//save MD
outv = out32 + (nvox3D * 13);
//memcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_min = calmin(nim);
nim->cal_max = calmax(nim);
nifti_save(nim, "_MD");
//single volume data
void *dat = (void *)calloc(1, nvox3D * sizeof(flt));
nim->data = dat;
flt *fa32 = (flt *)dat;
//save MO
//MODE https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;4fbed3d1.1103
// compute MO (MODE) from L1, L2, L3, MD
//e1=l1-MD, e2=l2-MD, e3=l3-MD;
//n = (e1 + e2 - 2*e3)*(2*e1 - e2 - e3)*(e1 - 2*e2 + e3);
//d = (e1*e1 + e2*e2 + e3*e3 - e1*e2 - e2*e3 - e1*e3);
//d = 2*d*d*d;
//mode = n/d;
//something is wrong with this formula.
// a. Ennis 2006 includes a sqrt that can not be factored out
// b. results differ from fslmaths
nim->cal_min = -1;
nim->cal_max = 1;
flt *L1 = out32;
flt *L2 = out32 + (nvox3D * 1);
flt *L3 = out32 + (nvox3D * 2);
flt *MD = out32 + (nvox3D * 13);
for (size_t i = 0; i < nvox3D; i++) {
flt e1 = L1[i] - MD[i];
flt e2 = L2[i] - MD[i];
flt e3 = L3[i] - MD[i];
flt n = (e1 + e2 - 2 * e3) * (2 * e1 - e2 - e3) * (e1 - 2 * e2 + e3);
flt d = (e1 * e1 + e2 * e2 + e3 * e3 - e1 * e2 - e2 * e3 - e1 * e3);
d = sqrt(d); //Correlation r = 0.999746
d = 2 * d * d * d;
//d = sqrt(d); //Correlation r = 0.990319
if (d != 0)
d = n / d; //mode
d = MIN(d, 1.0);
d = MAX(d, -1.0);
fa32[i] = d;
}
nifti_save(nim, "_MO");
//save FA
outv = out32 + (nvox3D * 12);
memcpy(fa32, outv, nvox3D * sizeof(flt));
nim->cal_min = 0;
nim->cal_max = 1;
nifti_save(nim, "_FA");
//keep FA in memory
nim->cal_max = 0;
_mm_free(out32);
return 0;
#else
fprintf(stderr, "not compiled to support tensor_decomp\n");
return 1;
#endif
} //nifti_tensor_decomp()
static void kernel3D_dilall(nifti_image *nim, int *kernel, int nkernel, int vol) {
int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
flt *f32 = (flt *)nim->data;
f32 += (nVox3D * vol);
flt *inf32 = (flt *)_mm_malloc(nVox3D * sizeof(flt), 64);
memcpy(inf32, f32, nVox3D * sizeof(flt));
int nxy = nim->nx * nim->ny;
size_t nZero = 1;
while (nZero > 0) {
nZero = 0;
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] != 0.0)
continue;
int nNot0 = 0;
flt sum = 0.0f;
for (size_t k = 0; k < nkernel; k++) {
size_t vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
nNot0++;
sum += inf32[vx];
} //for k
if (nNot0 > 0)
f32[i] = sum / nNot0;
nZero++;
} //for x
} //for y
} //for z
memcpy(inf32, f32, nVox3D * sizeof(flt));
//printf("n=0: %zu\n", nZero);
} //nZero > 0
_mm_free(inf32);
} //kernel3D_dilall()
static int kernel3D(nifti_image *nim, enum eOp op, int *kernel, int nkernel, int vol) {
int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
flt *f32 = (flt *)nim->data;
f32 += (nVox3D * vol);
flt *inf32 = (flt *)_mm_malloc(nVox3D * sizeof(flt), 64);
memcpy(inf32, f32, nVox3D * sizeof(flt));
int nxy = nim->nx * nim->ny;
if (op == fmediank) {
flt *vxls = (flt *)_mm_malloc((nkernel) * sizeof(flt), 64);
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
int nOK = 0;
for (size_t k = 0; k < nkernel; k++) {
size_t vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
vxls[nOK] = inf32[vx];
nOK++;
} //for k
qsort(vxls, nOK, sizeof(flt), compare);
int itm = (nOK * 0.5);
f32[i] = vxls[itm];
} //for x
} //for y
} //for z
_mm_free(vxls);
} else if (op == dilMk) {
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] != 0.0)
continue;
int nNot0 = 0;
flt sum = 0.0f;
for (size_t k = 0; k < nkernel; k++) {
size_t vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
nNot0++;
sum += inf32[vx];
} //for k
if (nNot0 > 0)
f32[i] = sum / nNot0;
} //for x
} //for y
} //for z
} else if (op == dilDk) { //maximum - fslmaths 6.0.1 emulation, note really MODE, max non-zero
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] != 0.0)
continue;
//flt mx = -INFINITY;
flt mx = NAN;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
flt v = inf32[vx];
if (v == 0.0)
continue;
mx = MAX(mx, inf32[vx]);
//with dilD a input voxel of 0
} //for k
//https://stackoverflow.com/questions/570669/checking-if-a-double-or-float-is-nan-in-c
// f != f will be true only if f is NaN
if (!(mx != mx))
f32[i] = mx;
} //for x
} //for y
} //for z
} else if (op == dilFk) { //maximum - fslmaths 6.0.1 appears to use "dilF" when the user requests "dilD"
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
flt mx = f32[i];
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] <= mx))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
mx = MAX(mx, inf32[vx]);
//if (mx < 0) continue; //with dilF, do not make a zero voxel darker than 0
} //for k
f32[i] = mx;
} //for x
} //for y
} //for z
} else if (op == dilallk) { // -dilall : Apply -dilM repeatedly until the entire FOV is covered");
kernel3D_dilall(nim, kernel, nkernel, vol);
} else if (op == eroFk) { //Minimum filtering of all voxels
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
f32[i] = MIN(f32[i], inf32[vx]);
} //for k
} //for x
} //for y
} //for z
} else if (op == fmeank) {
flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64);
for (int k = 0; k < nkernel; k++)
kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX);
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
flt sum = 0.0f;
flt wt = 0.0f;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
sum += (inf32[vx] * kwt[k]);
wt += kwt[k];
} //for k
f32[i] = sum / wt;
} //for x
} //for y
} //for z
_mm_free(kwt);
} else if (op == fmeanuk) {
flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64);
for (int k = 0; k < nkernel; k++)
kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX);
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
flt sum = 0.0f;
//flt wt = 0.0f;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
sum += (inf32[vx] * kwt[k]);
//wt += kwt[k];
} //for k
//f32[i] = sum / wt;
f32[i] = sum;
} //for x
} //for y
} //for z
_mm_free(kwt);
} else if (op == erok) {
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] == 0.0)
continue;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] != 0.0))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
f32[i] = 0.0;
} //for k
} //for x
} //for y
} //for z
} else {
fprintf(stderr, "kernel3D: Unsupported operation\n");
_mm_free(inf32);
return 1;
}
_mm_free(inf32);
return 0;
} //kernel3D
static int nifti_kernel(nifti_image *nim, enum eOp op, int *kernel, int nkernel) {
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nVol = (int)(nim->nvox / nVox3D);
if (nVol < 1)
return 1;
if ((nkernel < 1) || (kernel == NULL))
return 1;
for (int v = 0; v < nVol; v++) {
int ok = kernel3D(nim, op, kernel, nkernel, v);
if (ok != 0)
return ok;
}
return 0;
}
static int nifti_roi(nifti_image *nim, int xmin, int xsize, int ymin, int ysize, int zmin, int zsize, int tmin, int tsize) {
// "fslmaths LAS -roi 3 32 0 40 0 40 0 5 f "
int nt = nim->nvox / (nim->nx * nim->ny * nim->nz);
if ((nim->nvox < 1) || (nt < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *f32 = (flt *)nim->data;
//if (neg_determ(nim))
// do something profound; //determinants do not seem to influence "-roi"?
int xmax = xmin + xsize - 1;
int ymax = ymin + ysize - 1;
int zmax = zmin + zsize - 1;
int tmax = tmin + tsize - 1;
//printf("%d..%d", zmin, zmax);
size_t i = 0;
for (int t = 0; t < nt; t++) {
int tOK = 1;
if ((t < tmin) || (t > tmax))
tOK = 0;
for (int z = 0; z < nim->nz; z++) {
int zOK = 1;
if ((z < zmin) || (z > zmax))
zOK = 0;
for (int y = 0; y < nim->ny; y++) {
int yOK = 1;
if ((y < ymin) || (y > ymax))
yOK = 0;
for (int x = 0; x < nim->nx; x++) {
int xOK = 1;
if ((x < xmin) || (x > xmax))
xOK = 0;
if ((xOK == 0) || (yOK == 0) || (zOK == 0) || (tOK == 0))
f32[i] = 0.0;
i++;
} //x
} //y
} //z
} //t
return 0;
}
static int nifti_sobel(nifti_image *nim, int offc, int isBinary) {
//sobel is simply one kernel pass per dimension.
// this could be achieved with successive passes of "-kernel"
// here it is done in a single pass for cache efficiency
// https://en.wikipedia.org/wiki/Sobel_operator
int vox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if (nim->datatype != DT_CALC)
return 1;
int nvol = nim->nvox / vox3D;
int numk = 6; //center voxel and all its neighbors
int *kx = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int *ky = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int *kz = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int i = 0;
for (int x = 0; x <= 1; x++)
for (int y = -1; y <= 1; y++) {
int sgn = (2 * x) - 1; //-1 or +1
int weight = sgn * (2 - abs(y));
//kx compare left and right
kx[i + numk] = (2 * x) - 1; //left/right wrap
kx[i + numk + numk] = y; //anterior/posterior wrap
kx[i] = kx[i + numk] + (kx[i + numk + numk] * (nim->nx)); //voxel offset
kx[i + numk + numk + numk] = weight; //weight
//ky compare anterior and posterior
ky[i + numk] = y; //left/right wrap
ky[i + numk + numk] = (2 * x) - 1; //anterior/posterior wrap
ky[i] = ky[i + numk] + (ky[i + numk + numk] * (nim->nx)); //voxel offset
ky[i + numk + numk + numk] = weight; //weight
//kz superior/inferior
kz[i + numk] = y; //left/right wrap
kz[i + numk + numk] = 0; //anterior/posterior wrap
kz[i] = y + (((2 * x) - 1) * nim->nx * nim->ny); //voxel offset
kz[i + numk + numk + numk] = weight; //weight
//printf("x%d y%d wt%d\n", kx[i+numk], kx[i+numk+numk], kx[i+numk+numk+numk]);
//printf("x%d y%d wt%d\n", ky[i+numk], ky[i+numk+numk], ky[i+numk+numk+numk]);
i++;
} //for y
flt *i32 = (flt *)nim->data; //input volumes
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
flt *iv32 = i32 + (v * vox3D);
flt *imgin = _mm_malloc(vox3D * sizeof(flt), 64); //input values prior to blur
//edge information:
flt mx = 0.0;
uint8_t *imgdir = _mm_malloc(vox3D * sizeof(uint8_t), 64); //image direction
if (isBinary)
memset(imgdir, 0, vox3D * sizeof(uint8_t));
memcpy(imgin, iv32, vox3D * sizeof(flt));
int i = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (size_t x = 0; x < nim->nx; x++) {
//compute z gradient
flt gx = 0.0f;
for (size_t k = 0; k < numk; k++) {
size_t vx = i + kx[k];
if ((vx < 0) || (vx >= vox3D))
continue;
//next handle edge cases
int dx = x + kx[k + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kx[k + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
gx += imgin[vx] * kx[k + numk + numk + numk];
} //for k
//compute y gradient
flt gy = 0.0f;
for (size_t k = 0; k < numk; k++) {
size_t vx = i + ky[k];
if ((vx < 0) || (vx >= vox3D))
continue;
//next handle edge cases
int dx = x + ky[k + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + ky[k + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
gy += imgin[vx] * ky[k + numk + numk + numk];
} //for k
//compute z gradient
flt gz = 0.0f; //always 0 for 2D, we could add conditional to skip but optimize for 3D
for (size_t k = 0; k < numk; k++) {
size_t vx = i + kz[k];
if ((vx < 0) || (vx >= vox3D))
continue;
//next handle edge cases
int dx = x + kz[k + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kz[k + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
gz += imgin[vx] * kz[k + numk + numk + numk];
} //for k
gx = sqr(gx);
gy = sqr(gy);
gz = sqr(gz);
iv32[i] = sqrt(gx + gy + gz);
if (isBinary) {
mx = MAX(mx, iv32[i]);
if ((gx > gy) && (gx > gz))
imgdir[i] = 1; //left/right gradient is strongest
else if (gy > gz)
imgdir[i] = 2; //anterior/posterior gradient is strongest
else
imgdir[i] = 3; //superior/inferior gradient is strongest (or tie)
}
i++;
} //for x
if (isBinary) {
//magnitude in range 0..1, zero voxels below threshold
float scale = 1.0;
if (mx > 0.0)
scale = 1.0 / mx;
float thresh = 0.1;
for (int vx = 0; vx < vox3D; vx++) {
imgin[vx] = iv32[vx] * scale;
if (imgin[vx] < thresh) {
imgin[vx] = 0.0;
continue;
}
}
//zero output: we will not set border voxels
memset(iv32, 0, vox3D * sizeof(flt));
//
int nx = nim->nx;
int nxy = nx * nim->ny;
for (int z = 1; z < (nim->nz -1); z++)
for (int y = 1; y < (nim->ny - 1); y++)
for (size_t x = 1; x < (nim->nx - 1); x++) {
int vx = x + (y * nx) + (z * nxy);
float val = imgin[vx];
if (val == 0.0) continue;
float mxX = MAX(imgin[vx-1],imgin[vx+1]);
float mxY = MAX(imgin[vx-nx],imgin[vx+nx]);
float mxZ = MAX(imgin[vx-nxy],imgin[vx+nxy]);
if ((imgdir[vx] == 1) && (val > mxX) && ((mxY > 0.0) || (mxZ > 0.0)) ) //left/right gradient
iv32[vx] = 1.0;
else if ((imgdir[vx] == 2) && (val > mxY) && ((mxX > 0.0) || (mxZ > 0.0)) ) //anterior/posterior gradient
iv32[vx] = 1.0;
else if ((val > mxZ) && ((mxX > 0.0) || (mxY > 0.0)))//head/foot gradient
iv32[vx] = 1.0;
}
nim->scl_inter = 0.0;
nim->scl_slope = 1.0;
nim->cal_min = 0.0;
nim->cal_max = 1.0;
} //if isBinary
_mm_free(imgdir);
_mm_free(imgin);
} //for each volume
_mm_free(kx);
_mm_free(ky);
_mm_free(kz);
return 0;
} //nifti_sobel()
static int nifti_subsamp2(nifti_image *nim, int offc) {
//naive downsampling: this is provided purely to mimic the behavior of fslmaths
// see https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/s0nw827nc4kcnaa/Aliasing.ipynb
// no anti-aliasing filter https://en.wikipedia.org/wiki/Image_scaling
int invox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int indim[5];
for (int i = 1; i < 5; i++)
indim[i] = MAX(nim->dim[i], 1);
int nvol = nim->nvox / invox3D;
int x_odd = indim[1] % 2;
if ((nim->nvox < 1) || (nvol < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nx = ceil(nim->nx * 0.5);
int ny = ceil(nim->ny * 0.5);
int nz = ceil(nim->nz * 0.5);
if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz))
return 0;
int nvox3D = nx * ny * nz;
flt *i32 = (flt *)nim->data;
void *dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt));
flt *o32 = (flt *)dat;
int x_flip = 0;
if (!neg_determ(nim))
x_flip = 1;
if (offc) {
int *wt = _mm_malloc(nvox3D * nvol * sizeof(int), 64); //weight, just for edges
for (int i = 0; i < (nvox3D * nvol); i++) {
wt[i] = 0;
o32[i] = 0.0;
}
int boost = 0;
if ((x_odd) && (x_flip))
boost = 1;
size_t i = 0;
for (int v = 0; v < indim[4]; v++) {
size_t vo = v * nvox3D; //volumes do not get reduced
for (int z = 0; z < indim[3]; z++) {
size_t zo = vo + ((z / 2) * ny * nx);
for (int y = 0; y < indim[2]; y++) {
size_t yo = zo + ((y / 2) * nx);
for (int x = 0; x < indim[1]; x++) {
size_t xo = yo + ((x + boost) / 2);
wt[xo]++;
o32[xo] += i32[i];
i++;
} //x
} //y
} //z
} //vol
for (int i = 0; i < (nvox3D * nvol); i++)
if (wt[i] > 0)
o32[i] /= wt[i];
_mm_free(wt);
} else { //if subsamp2offc else subsamp2
int numk = 27; //center voxel and all its neighbors
int *kernel = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int i = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
kernel[i] = x + (y * indim[1]) + (z * indim[1] * indim[2]);
kernel[i + numk] = x; //left-right wrap detection
kernel[i + numk + numk] = y;//anterior-posterior wrap detection
kernel[i + numk + numk + numk] = 8 / (pow(2, sqr(x) + sqr(y) + sqr(z))); //kernel weight
i++;
}
int boost = 0;
//if ((xflip == 1) && (odd == 0)) boost = 1;
if ((x_flip == 1) && (x_odd == 0))
boost = 1;
//printf("boost %d\n", boost);
size_t nvox3Din = indim[1] * indim[2] * indim[3];
size_t o = 0;
for (int v = 0; v < nvol; v++) {
size_t vi = v * nvox3Din;
for (int z = 0; z < nz; z++) {
int zi = (2 * z * indim[1] * indim[2]);
//printf("%zu \n", zi);
for (int y = 0; y < ny; y++) {
int yy = y + y; //y*2 input y
int yi = zi + (yy * indim[1]);
for (int x = 0; x < nx; x++) {
//int xx = x+x+xflip; //x*2 input x
int xx = x + x + boost; //x*2 input x
int xi = yi + xx;
//flt sum = 0.0;
//flt wt = 0.0;
double sum = 0.0;
double wt = 0.0;
for (int k = 0; k < numk; k++) {
if ((xi + kernel[k]) < 0)
continue; //position would be less than 0 - outside volume, avoid negative values in size_t
size_t pos = xi + kernel[k]; //offset
if (pos >= nvox3Din)
continue; //position outside volume, e.g. slice above top of volume
int xin = xx + kernel[k + numk];
if ((xin < 0) || (xin >= indim[1]))
continue; //wrap left or right
int yin = yy + kernel[k + numk + numk];
if ((yin < 0) || (yin >= indim[2]))
continue; //wrap anterior or posterior
flt w = kernel[k + numk + numk + numk];
wt += w;
sum += i32[vi + pos] * w;
}
//if (wt > 0.0) //no need to check: every voxel has at least one contributor (itself)
o32[o] = sum / wt;
//else {
// o32[o] = 666.6;
o++;
} //x
} //y
} //z
} //vol
_mm_free(kernel);
} //if subsamp2offc else subsamp2
nim->nvox = nvox3D * nvol;
nim->nx = nx;
nim->ny = ny;
nim->nz = nz;
nim->dim[1] = nx;
nim->dim[2] = ny;
nim->dim[3] = nz;
nim->dx *= 2;
nim->dy *= 2;
nim->dz *= 2;
nim->pixdim[1] *= 2;
nim->pixdim[2] *= 2;
nim->pixdim[3] *= 2;
//adjust origin
mat44 m = xform(nim);
vec4 vx = setVec4(0, 0, 0);
vec4 pos = nifti_vect44mat44_mul(vx, m);
//vx = setVec4(0.5,0.5,0.5);
//vx = setVec4(1.0,0.0,0.0);
if (offc) {
//printf("%d flip odd %d\n", x_flip, x_odd);
if ((x_odd) && (x_flip))
vx = setVec4(-0.5, -0.5, -0.5); //subsamp2offc
else
vx = setVec4(0.5, 0.5, 0.5); //subsamp2offc
//if (!xflip) {
// vx = setVec4(0.5,0.5,0.5);
// printf("y\n");
//}
} else {
if (x_odd)
vx = setVec4(0, 0, 0); //subsamp2
else
vx = setVec4(1, 0, 0); //subsamp2
if (!x_flip)
vx = setVec4(0, 0, 0);
}
vec4 pos1 = nifti_vect44mat44_mul(vx, m);
vx = setVec4(pos1.v[0] - pos.v[0], pos1.v[1] - pos.v[1], pos1.v[2] - pos.v[2]);
m.m[0][3] += vx.v[0];
m.m[1][3] += vx.v[1];
m.m[2][3] += vx.v[2];
//scale spatial transform
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
m.m[i][j] *= 2;
//apply to both sform and qform in case VTK user
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++) {
nim->sto_xyz.m[i][j] = m.m[i][j];
nim->qto_xyz.m[i][j] = m.m[i][j];
}
free(nim->data);
nim->data = dat;
return 0;
}
static int nifti_resize(nifti_image *nim, flt zx, flt zy, flt zz, int interp_method) {
//see AFNI's 3dresample
//better than fslmaths: fslmaths can not resample 4D data
// time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni.nii -input rest.nii
// time ./sm rest.nii -subsamp2 out.nii
//However, aliasing artifacts
// time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni2.nii -input zoneplate3d_129.nii
int invox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / invox3D;
if ((nim->nvox < 1) || (nvol < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nx = ceil(nim->nx * zx);
int ny = ceil(nim->ny * zy);
int nz = ceil(nim->nz * zz);
if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz))
return 0;
int nvox3D = nx * ny * nz;
flt *i32 = (flt *)nim->data;
void *dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt));
flt *o32 = (flt *)dat;
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
flt *iv32 = i32 + (v * invox3D);
//reduce in X: half the width: 1/2 input file size
flt *imgx = _mm_malloc(nx * nim->ny * nim->nz * sizeof(flt), 64); //input values prior to blur
if (nx == nim->nx) //no change in x dimension
memcpy(imgx, iv32, nx * nim->ny * nim->nz * sizeof(flt));
else {
CLIST *contrib = createFilter(nim->nx, nx, interp_method);
size_t i = 0;
for (size_t y = 0; y < (nim->ny * nim->nz); y++) {
for (int x = 0; x < nx; x++) {
flt weight = 0.0;
for (int j = 0; j < contrib[x].n; j++)
weight += iv32[contrib[x].p[j].pixel] * contrib[x].p[j].weight;
imgx[i++] = weight;
}
iv32 += nim->nx;
} //for y
for (i = 0; i < nx; i++)
free(contrib[i].p);
free(contrib);
}
//reduce in Y: half the height: 1/4 input size
flt *imgy = _mm_malloc(nx * ny * nim->nz * sizeof(flt), 64); //input values prior to blur
if (ny == nim->ny) //no change in y dimension
memcpy(imgy, imgx, nx * ny * nim->nz * sizeof(flt));
else {
CLIST *contrib = createFilter(nim->ny, ny, interp_method);
flt *iny = _mm_malloc(nim->ny * sizeof(flt), 64); //input values prior to resize
for (int z = 0; z < nim->nz; z++) {
for (int x = 0; x < nx; x++) {
int yo = (z * nx * ny) + x; //output
int yi = (z * nx * nim->ny) + x; //input
for (int j = 0; j < nim->ny; j++) {
//iny[j] = imgx[yi+(j*nx)];
iny[j] = imgx[yi];
yi += nx;
}
for (int y = 0; y < ny; y++) {
flt weight = 0.0;
for (int j = 0; j < contrib[y].n; j++)
weight += iny[contrib[y].p[j].pixel] * contrib[y].p[j].weight;
//weight = y;
imgy[yo] = weight;
yo += nx;
} //y
} //x
} //z
_mm_free(iny);
for (int i = 0; i < ny; i++)
free(contrib[i].p);
free(contrib);
}
_mm_free(imgx);
//reduce in Z
flt *ov32 = o32 + (v * nvox3D);
if (nz == nim->nz) //no change in x dimension
memcpy(ov32, imgy, nx * ny * nz * sizeof(flt));
else {
CLIST *contrib = createFilter(nim->nz, nz, interp_method);
flt *inz = _mm_malloc(nim->nz * sizeof(flt), 64); //input values prior to resize
int nxy = nx * ny;
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
int zo = x + (y * nx); //output offset
int zi = x + (y * nx); //input offset
for (int j = 0; j < nim->nz; j++) {
inz[j] = imgy[zi];
zi += nxy;
}
for (int z = 0; z < nz; z++) {
//for (int j = 0; j < nim->nz; j++)
// inz[j] = imgy[zi+(j*nx*ny)];
flt weight = 0.0;
for (int j = 0; j < contrib[z].n; j++)
weight += inz[contrib[z].p[j].pixel] * contrib[z].p[j].weight;
//weight = y;
ov32[zo] = weight;
zo += nx * ny;
} //for z
} //for x
} //for y
_mm_free(inz);
for (int i = 0; i < nz; i++)
free(contrib[i].p);
free(contrib);
}
_mm_free(imgy);
} //for v
nim->nvox = nvox3D * nvol;
nim->nx = nx;
nim->ny = ny;
nim->nz = nz;
nim->dim[1] = nx;
nim->dim[2] = ny;
nim->dim[3] = nz;
nim->dx /= zx;
nim->dy /= zy;
nim->dz /= zz;
nim->pixdim[1] /= zx;
nim->pixdim[2] /= zy;
nim->pixdim[3] /= zz;
//adjust origin - again, just like fslmaths
mat44 m = xform(nim);
m.m[0][0] /= zx;
m.m[1][0] /= zx;
m.m[2][0] /= zx;
m.m[0][1] /= zy;
m.m[1][1] /= zy;
m.m[2][1] /= zy;
m.m[0][2] /= zz;
m.m[1][2] /= zz;
m.m[2][2] /= zz;
for (int i = 0; i < 4; i++) //transform BOTH sform and qform (e.g. ANTs/ITK user)
for (int j = 0; j < 4; j++) {
nim->sto_xyz.m[i][j] = m.m[i][j];
nim->qto_xyz.m[i][j] = m.m[i][j];
}
free(nim->data);
nim->data = dat;
return 0;
}
static int essentiallyEqual(float a, float b) {
if (isnan(a) && isnan(b))
return 1; //surprisingly, with C nan != nan
return fabs(a - b) <= ((fabs(a) > fabs(b) ? fabs(b) : fabs(a)) * epsilon);
}
static void nifti_compare(nifti_image *nim, char *fin) {
if (nim->nvox < 1)
exit(1);
if (nim->datatype != DT_CALC) {
fprintf(stderr, "nifti_compare: Unsupported datatype %d\n", nim->datatype);
exit(1);
}
nifti_image *nim2 = nifti_image_read2(fin, 1);
if (!nim2) {
fprintf(stderr, "** failed to read NIfTI image from '%s'\n", fin);
exit(2);
}
if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz)) {
fprintf(stderr, "** Attempted to process images of different sizes %" PRId64 "x%" PRId64 "x%" PRId64 "vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nim2->nx, nim2->ny, nim2->nz);
nifti_image_free(nim2);
exit(1);
}
if (nim->nvox != nim2->nvox) {
fprintf(stderr, " Number of volumes differ\n");
nifti_image_free(nim2);
exit(1);
}
if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm
fprintf(stderr, "WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2));
fprintf(stderr, " Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n");
}
in_hdr ihdr = set_input_hdr(nim2);
if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) {
nifti_image_free(nim2);
exit(1);
}
flt *img = (flt *)nim->data;
flt *img2 = (flt *)nim2->data;
size_t differentVox = nim->nvox;
double sum = 0.0;
double sum2 = 0.0;
double maxDiff = 0.0;
size_t nNotNan = 0;
size_t nDifferent = 0;
for (size_t i = 0; i < nim->nvox; i++) {
if (!essentiallyEqual(img[i], img2[i])) {
if (fabs(img[i] - img2[i]) > maxDiff) {
differentVox = i;
maxDiff = fabs(img[i] - img2[i]);
}
nDifferent++;
}
if (isnan(img[i]) || isnan(img[i]))
continue;
nNotNan++;
sum += img[i];
sum2 += img2[i];
}
if (differentVox >= nim->nvox) {
//fprintf(stderr,"Images essentially equal\n"); */
nifti_image_free(nim2);
exit(0);
}
//second pass - one pass correlation is inaccurate or slow
nNotNan = MAX(1, nNotNan);
flt mn = INFINITY; //do not set to item 1, in case it is nan
flt mx = -INFINITY;
flt sd = 0.0;
flt ave = sum / nNotNan;
flt mn2 = INFINITY;
flt mx2 = -INFINITY;
flt sd2 = 0.0;
flt ave2 = sum2 / nNotNan;
//for i := 0 to (n - 1) do
// sd := sd + sqr(y[i] - mn);
//sd := sqrt(sd / (n - 1));
double sumDx = 0.0;
for (size_t i = 0; i < nim->nvox; i++) {
if (isnan(img[i]) || isnan(img[i]))
continue;
mn = MIN(mn, img[i]);
mx = MAX(mx, img[i]);
sd += sqr(img[i] - ave);
mn2 = MIN(mn2, img2[i]);
mx2 = MAX(mx2, img2[i]);
sd2 += sqr(img2[i] - ave2);
sumDx += (img[i] - ave) * (img2[i] - ave2);
}
double r = 0.0;
nNotNan = MAX(2, nNotNan);
if (nim->nvox < 2) {
sd = 0.0;
sd2 = 0.0;
} else {
sd = sqrt(sd / (nNotNan - 1));
//if (sd != 0.0) sd = 1.0/sd;
sd2 = sqrt(sd2 / (nNotNan - 1));
//if (sd2 != 0.0) sd2 = 1.0/sd2;
if ((sd * sd2) != 0.0)
r = sumDx / (sd * sd2 * (nNotNan - 1));
//r = r / (nim->nvox - 1);
}
r = MIN(r, 1.0);
r = MAX(r, -1.0);
fprintf(stderr, "Images Differ: Correlation r = %g, identical voxels %d%%\n", r, (int)floor(100.0 * (1.0 - (double)nDifferent / (double)nim->nvox)));
if (nNotNan < nim->nvox) {
fprintf(stderr, " %" PRId64 " voxels have a NaN in at least one image.\n", nim->nvox - nNotNan);
fprintf(stderr, " Descriptives consider voxels that are numeric in both images.\n");
}
fprintf(stderr, " Most different voxel %g vs %g (difference %g)\n", img[differentVox], img2[differentVox], maxDiff);
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int nVol = nim->nvox / nvox3D;
size_t vx[4];
vx[3] = differentVox / nvox3D;
vx[2] = (differentVox / (nim->nx * nim->ny)) % nim->nz;
vx[1] = (differentVox / nim->nx) % nim->ny;
vx[0] = differentVox % nim->nx;
fprintf(stderr, " Most different voxel location %zux%zux%zu volume %zu\n", vx[0], vx[1], vx[2], vx[3]);
fprintf(stderr, "Image 1 Descriptives\n");
fprintf(stderr, " Range: %g..%g Mean %g StDev %g\n", mn, mx, ave, sd);
fprintf(stderr, "Image 2 Descriptives\n");
fprintf(stderr, " Range: %g..%g Mean %g StDev %g\n", mn2, mx2, ave2, sd2);
//V1 comparison - EXIT_SUCCESS if all vectors are parallel (for DWI up vector [1 0 0] has same direction as down [-1 0 0])
if (nVol != 3) {
nifti_image_free(nim2);
exit(1);
}
int allParallel = 1;
//niimath ft_V1 -compare nt_V1
for (size_t i = 0; i < nvox3D; i++) {
//check angle of two vectors... assume unit vectors
flt v[3]; //vector, image 1
v[0] = img[i];
v[1] = img[i + nvox3D];
v[2] = img[i + nvox3D + nvox3D];
flt v2[3]; //vector, image 2
v2[0] = img2[i];
v2[1] = img2[i + nvox3D];
v2[2] = img2[i + nvox3D + nvox3D];
flt x[3]; //cross product
x[0] = (v[1] * v2[2]) - (v[2] * v2[1]);
x[1] = (v[2] * v2[0]) - (v[0] * v2[2]);
x[2] = (v[0] * v2[1]) - (v[1] * v2[0]);
flt len = sqrt((x[0] * x[0]) + (x[1] * x[1]) + (x[2] * x[2]));
if (len > 0.01) {
allParallel = 0;
//fprintf(stderr,"[%g %g %g] vs [%g %g %g]\n", v[0],v[1], v[2], v2[0], v2[1], v2[2]);
break;
}
}
if (allParallel) {
fprintf(stderr, "Despite polarity differences, all vectors are parallel.\n");
nifti_image_free(nim2);
exit(0);
}
nifti_image_free(nim2);
exit(1);
} //nifti_compare()
static int nifti_binary_power(nifti_image *nim, double v) {
//clone operations from ANTS ImageMath: power
//https://manpages.debian.org/jessie/ants/ImageMath.1.en.html
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt fv = v;
flt *f32 = (flt *)nim->data;
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = pow(f32[i], v);
return 0;
}
static int nifti_binary(nifti_image *nim, char *fin, enum eOp op) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC) {
fprintf(stderr, "nifti_binary: Unsupported datatype %d\n", nim->datatype);
return 1;
}
nifti_image *nim2 = nifti_image_read2(fin, 1);
if (!nim2) {
fprintf(stderr, "** failed to read NIfTI image from '%s'\n", fin);
return 2;
}
if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz)) {
fprintf(stderr, "** Attempted to process images of different sizes %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nim2->nx, nim2->ny, nim2->nz);
nifti_image_free(nim2);
return 1;
}
if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm
fprintf(stderr, "WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2));
fprintf(stderr, " Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n");
}
in_hdr ihdr = set_input_hdr(nim2);
if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) {
nifti_image_free(nim2);
return 1;
}
flt *imga = (flt *)nim->data;
flt *imgb = (flt *)nim2->data;
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvola = nim->nvox / nvox3D;
int nvolb = nim2->nvox / nvox3D;
int rem0 = 0;
int swap4D = 0; //if 1: input nim was 3D, but nim2 is 4D: output will be 4D
if ((nvolb > 1) && (nim->nvox != nim2->nvox) && ((op == uthr) || (op == thr))) {
//"niimath 3D -uthr 4D out" only uses 1st volume of 4D, only one volume out
nvolb = 1; //fslmaths
printf("threshold operation expects 3D mask\n"); //fslmaths makes not modification to image
if (op == uthr) //strictly for fslmaths compatibility - makes no sense
for (size_t i = 0; i < nim->nvox; i++)
imga[i] = 0;
nifti_image_free(nim2);
return 0;
} else if (nim->nvox != nim2->nvox) {
//situation where one input is 3D and the other is 4D
if ((nvola != 1) && ((nvolb != 1))) {
fprintf(stderr, "nifti_binary: both images must have the same number of volumes, or one must have a single volume (%d and %d)\n", nvola, nvolb);
nifti_image_free(nim2);
return 1;
}
if (nvola == 1) {
imgb = (flt *)nim->data;
imga = (flt *)nim2->data;
swap4D = 1;
nvolb = nim->nvox / nvox3D;
nvola = nim2->nvox / nvox3D;
}
} //make it so imga/novla >= imgb/nvolb
for (int v = 0; v < nvola; v++) { //
int va = v * nvox3D; //start of volume for image A
int vb = (v % nvolb) * nvox3D; //start of volume for image B
if (op == add) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] += imgb[vb + i];
} else if (op == sub) {
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
imga[va + i] = imgb[vb + i] - imga[va + i];
//printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]);
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]);
imga[va + i] = imga[va + i] - imgb[vb + i];
}
}
} else if (op == mul) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] *= imgb[vb + i];
} else if (op == max) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] = MAX(imga[va + i], imgb[vb + i]);
} else if (op == min) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] = MIN(imga[va + i], imgb[vb + i]);
} else if (op == thr) {
//thr : use following number to threshold current image (zero anything below the number)
for (int i = 0; i < nvox3D; i++)
if (imga[va + i] < imgb[vb + i])
imga[va + i] = 0;
} else if (op == uthr) {
//uthr : use following number to upper-threshold current image (zero anything above the number)
for (int i = 0; i < nvox3D; i++)
if (imga[va + i] > imgb[vb + i])
imga[va + i] = 0;
} else if (op == mas) {
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
if (imga[va + i] > 0)
imga[va + i] = imgb[vb + i];
else
imga[va + i] = 0;
}
} else {
for (int i = 0; i < nvox3D; i++)
if (imgb[vb + i] <= 0)
imga[va + i] = 0;
}
} else if (op == divX) {
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
//flt x = imga[va+i];
if (imga[va + i] != 0.0f)
imga[va + i] = imgb[vb + i] / imga[va + i];
//printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]);
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]);
if (imgb[vb + i] == 0.0f)
imga[va + i] = 0.0f;
else
imga[va + i] = imga[va + i] / imgb[vb + i];
}
}
} else if (op == mod) { //afni mod function, divide by zero yields 0 (unlike Matlab, see remtest.m)
//fractional remainder:
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
//printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) );
if (imga[va + i] != 0.0f)
imga[va + i] = fmod(imgb[vb + i], imga[va + i]);
else {
rem0 = 1;
imga[va + i] = 0; //imgb[vb+i];
}
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) );
if (imgb[vb + i] != 0.0f)
//imga[va+i] = round(fmod(imga[va+i], imgb[vb+i]));
imga[va + i] = fmod(imga[va + i], imgb[vb + i]);
else {
rem0 = 1;
imga[va + i] = 0;
}
}
}
} else if (op == rem) { //fmod _rem
//fractional remainder:
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
//printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) );
if (trunc(imga[va + i]) != 0.0f)
imga[va + i] = fmod(trunc(imgb[vb + i]), trunc(imga[va + i]));
else {
rem0 = 1;
imga[va + i] = imgb[vb + i];
}
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) );
if (trunc(imgb[vb + i]) != 0.0f)
//imga[va+i] = round(fmod(imga[va+i], imgb[vb+i]));
imga[va + i] = fmod(trunc(imga[va + i]), trunc(imgb[vb + i]));
else
rem0 = 1;
}
}
} else {
fprintf(stderr, "nifti_binary: unsupported operation %d\n", op);
nifti_image_free(nim2);
return 1;
}
}
if (swap4D) { //if 1: input nim was 3D, but nim2 is 4D: output will be 4D
nim->nvox = nim2->nvox;
nim->ndim = nim2->ndim;
nim->nt = nim2->nt;
nim->nu = nim2->nu;
nim->nv = nim2->nv;
nim->nw = nim2->nw;
for (int i = 4; i < 8; i++) {
nim->dim[i] = nim2->dim[i];
nim->pixdim[i] = nim2->pixdim[i];
}
nim->dt = nim2->dt;
nim->du = nim2->du;
nim->dv = nim2->dv;
nim->dw = nim2->dw;
free(nim->data);
nim->data = nim2->data;
nim2->data = NULL;
}
nifti_image_free(nim2);
if (rem0) {
fprintf(stderr, "Warning -rem image included zeros (fslmaths exception)\n");
return 0;
}
return 0;
} // nifti_binary()
struct sortIdx {
flt val;
int idx;
};
static int nifti_roc(nifti_image *nim, double fpThresh, const char *foutfile, const char *fnoise, const char *ftruth) {
if (nim->datatype != DT_CALC)
return 1;
//(nim, thresh, argv[outfile], fnoise, argv[truth]);
//fslmaths appears to ignore voxels on edge of image, and will crash with small images:
// error: sort(): given object has non-finite elements
//therefore, there is a margin ("border") around the volume
int border = 5; //in voxels
int mindim = border + border + 1; //e.g. minimum size has one voxel surrounded by border on each side
if ((nim->nx < mindim) || (nim->ny < mindim) || (nim->nz < mindim)) {
fprintf(stderr, "volume too small for ROC analyses\n");
return 1;
}
if (nim->nvox > (nim->nx * nim->ny * nim->nz)) {
fprintf(stderr, "ROC input should be 3D image (not 4D)\n"); //fslmaths seg faults
return 1;
}
if ((fpThresh <= 0.0) || (fpThresh >= 1.0)) {
fprintf(stderr, "ROC false-positive threshold should be between 0 and 1, not '%g'\n", fpThresh);
return 1;
}
nifti_image *nimTrue = nifti_image_read2(ftruth, 1);
if (!nimTrue) {
fprintf(stderr, "** failed to read NIfTI image from '%s'\n", ftruth);
exit(2);
}
if ((nim->nx != nimTrue->nx) || (nim->ny != nimTrue->ny) || (nim->nz != nimTrue->nz)) {
fprintf(stderr, "** Truth image is the wrong size %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nimTrue->nx, nimTrue->ny, nimTrue->nz);
nifti_image_free(nimTrue);
exit(1);
}
if (nimTrue->nvox > (nimTrue->nx * nimTrue->ny * nimTrue->nz)) {
fprintf(stderr, "ROC truth should be 3D image (not 4D)\n"); //fslmaths seg faults
return 1;
}
nifti_image *nimNoise = NULL;
//count number of tests
//If the truth image contains negative voxels these get excluded from all calculations
int nTest = 0;
int nTrue = 0;
size_t i = 0;
flt *imgTrue = (flt *)nimTrue->data;
flt *imgObs = (flt *)nim->data;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border))) {
nTest++;
if (imgTrue[i] > 0)
nTrue++;
}
i++;
}
if (nTest < 1) {
fprintf(stderr, "** All truth voxels inside border are negative\n");
exit(1);
}
//printf("%d %d = %d\n", nTrue, nFalse, nTest);
if (nTest == nTrue)
fprintf(stderr, "Warning: All truth voxels inside border are the same (all true or all false)\n");
struct sortIdx *k = (struct sortIdx *)_mm_malloc(nTest * sizeof(struct sortIdx), 64);
//load the data
nTest = 0;
i = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border))) {
k[nTest].val = imgObs[i];
k[nTest].idx = imgTrue[i] > 0;
nTest++;
}
i++;
}
qsort(k, nTest, sizeof(struct sortIdx), compare);
//for (int v = 0; v < nvol; v++ )
// f32[ k[v].idx ] = v + 1;
//printf("%d tests, intensity range %g..%g\n", nTest, k[0].val, k[nTest-1].val);
FILE *txt = fopen(foutfile, "w+");
flt threshold = k[nTest - 1].val; //maximum observed intensity
int bins = 1000; //step size: how often are results reported
flt step = (threshold - k[0].val) / bins; //[max-min]/bins
int fp = 0;
int tp = 0;
if (fnoise != NULL) {
nimNoise = nifti_image_read2(fnoise, 1);
if ((nim->nx != nimNoise->nx) || (nim->ny != nimNoise->ny) || (nim->nz != nimNoise->nz)) {
fprintf(stderr, "** Noise image is the wrong size %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nimNoise->nx, nimNoise->ny, nimNoise->nz);
nifti_image_free(nimTrue);
nifti_image_free(nimNoise);
exit(1);
}
//Matlab script roc.m generates samples you can process with fslmaths.\
// The fslmaths text file includes two additional columns of output not described by the help documentation
// Appears to find maximum signal in each noise volume, regardless of whether it is a hit or false alarm.
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nimNoise->nvox / nvox3D;
if (nvol < 10)
fprintf(stderr, "Warning: Noise images should include many volumes for estimating familywise error/\n");
flt *imgNoise = (flt *)nimNoise->data;
flt *mxVox = (flt *)_mm_malloc(nvol * sizeof(flt), 64);
for (int v = 0; v < nvol; v++) { //for each volume
mxVox[v] = -INFINITY;
size_t vo = v * nvox3D;
size_t vi = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if ((imgTrue[vi] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)))
mxVox[v] = MAX(mxVox[v], imgNoise[vo + vi]);
vi++;
}
} //for each volume
nifti_image_free(nimNoise);
qsort(mxVox, nvol, sizeof(flt), compare);
int idx = nTest - 1;
flt mxNoise = mxVox[nvol - 1];
while ((idx >= 1) && (k[idx].val > mxNoise)) {
tp++;
idx--;
if ((k[idx].val != k[idx - 1].val) && (k[idx].val <= threshold)) {
fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
} //more significant than any noise...
int fpThreshInt = round(fpThresh * nvol); //stop when number of false positives exceed this
for (int i = nvol - 1; i >= 1; i--) {
fp++; //false alarm
while ((idx >= 1) && (k[idx].val >= mxVox[i])) {
tp++;
idx--;
if ((k[idx].val != k[idx - 1].val) && (k[idx].val <= threshold)) {
fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
} //at least as significant as current noise
if ((fp > fpThreshInt) || ((k[i].val != k[i - 1].val) && (k[i].val <= threshold))) {
//printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold);
fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
if (fp > fpThreshInt)
break;
} //inspect all tests...
_mm_free(mxVox);
exit(1);
} else { //if noise image else infer FP/TP from input image
int nFalse = nTest - nTrue;
int fpThreshInt = ceil(fpThresh * nFalse); //stop when number of false positives exceed this
for (int i = nTest - 1; i >= 1; i--) {
if (k[i].idx == 0)
fp++; //false alarm
else
tp++; //hit
if ((fp > fpThreshInt) || ((k[i].val != k[i - 1].val) && (k[i].val <= threshold))) {
//printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold);
fprintf(txt, "%g %g %g\n", (double)fp / (double)nFalse, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
if (fp > fpThreshInt)
break;
} //inspect all tests...
} //if noise else...
fclose(txt);
_mm_free(k);
nifti_image_free(nimTrue);
return 0;
}
static int nifti_fillh(nifti_image *nim, int is26) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nim->nvox / nvox3D;
//size_t nxy = nim->nx * nim->ny; //slice increment
uint8_t *vx = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64);
memset(vx, 0, nim->nvox * sizeof(uint8_t));
size_t n1 = 0;
flt *f32 = (flt *)nim->data;
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] > 0.0) {
n1++;
vx[i] = 1;
}
if ((n1 < 1) || (nim->nx < 3) || (nim->ny < 3) || (nim->nz < 3)) {
//if fewer than 3 rows, columns or slices all voxels touch edge.
//only a binary threshold, not a flood fill
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = vx[i];
_mm_free(vx);
return 1;
}
//set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap
int numk = 6;
if (is26)
numk = 26;
int32_t *k = (int32_t *)_mm_malloc(numk * sizeof(int32_t), 64); //queue with untested seed
if (is26) {
int j = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny);
j++;
} //for x
} else { //if 26 neighbors else 6..
k[0] = nim->nx * nim->ny; //up
k[1] = -k[0]; //down
k[2] = nim->nx; //anterior
k[3] = -k[2]; //posterior
k[4] = 1; //left
k[5] = -1;
}
//https://en.wikipedia.org/wiki/Flood_fill
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
uint8_t *vxv = vx;
vxv += (v * nvox3D);
uint8_t *vxs = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64);
memcpy(vxs, vxv, nvox3D * sizeof(uint8_t)); //dst, src
int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed
int qlo = 0;
int qhi = -1; //ints always signed in C!
//load edges
size_t i = 0;
for (int z = 0; z < nim->nz; z++) {
int zedge = 0;
if ((z == 0) || (z == (nim->nz - 1)))
zedge = 1;
for (int y = 0; y < nim->ny; y++) {
int yedge = 0;
if ((y == 0) || (y == (nim->ny - 1)))
yedge = 1;
for (int x = 0; x < nim->nx; x++) {
if ((vxs[i] == 0) && (zedge || yedge || (x == 0) || (x == (nim->nx - 1)))) { //found new seed
vxs[i] = 1; //do not find again
qhi++;
q[qhi] = i;
} // new seed
i++;
} //for x
} //y
} //z
//printf("seeds %d kernel %d\n", qhi+1, numk);
//run a 'first in, first out' queue
while (qhi >= qlo) {
//retire one seed, add 0..6 new ones (fillh) or 0..26 new ones (fillh26)
for (int j = 0; j < numk; j++) {
int jj = q[qlo] + k[j];
if ((jj < 0) || (jj >= nvox3D))
continue;
if (vxs[jj] != 0)
continue;
//add new seed;
vxs[jj] = 1;
qhi++;
q[qhi] = jj;
}
qlo++;
} //while qhi >= qlo: continue until all seeds tested
for (size_t i = 0; i < nvox3D; i++)
if (vxs[i] == 0)
vxv[i] = 1; //hidden internal voxel not found from the fill
_mm_free(vxs);
_mm_free(q);
} //for each volume
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = vx[i];
_mm_free(vx);
_mm_free(k);
return 0;
}
static void rand_test() {
//https://www.phoronix.com/scan.php?page=news_item&px=Linux-RdRand-Sanity-Check
int r0 = rand();
for (int i = 0; i < 7; i++)
if (rand() != r0)
return;
fprintf(stderr, "RDRAND gives funky output: update firmware\n");
}
static int nifti_unary(nifti_image *nim, enum eOp op) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC) {
fprintf(stderr, "nifti_unary: Unsupported datatype %d\n", nim->datatype);
return 1;
}
flt *f32 = (flt *)nim->data;
if (op == exp1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = exp(f32[i]);
} else if (op == log1) {
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] <= 0.0)
f32[i] = 0.0;
else
f32[i] = log(f32[i]);
}
} else if (op == floor1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = floor(f32[i]);
} else if (op == round1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = round(f32[i]);
} else if (op == ceil1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = ceil(f32[i]);
} else if (op == trunc1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = trunc(f32[i]);
} else if (op == sin1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = sin(f32[i]);
} else if (op == cos1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = cos(f32[i]);
} else if (op == tan1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = tan(f32[i]);
} else if (op == asin1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = asin(f32[i]);
} else if (op == acos1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = acos(f32[i]);
} else if (op == atan1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = atan(f32[i]);
} else if (op == sqr1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = f32[i] * f32[i]; //<- pow(a,x) uses flt for x
} else if (op == sqrt1) {
nifti_sqrt(f32, nim->nvox);
} else if (op == recip1) { //https://stackoverflow.com/questions/10606483/sse-reciprocal-if-not-zero
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] == 0.0f)
continue;
f32[i] = 1.0 / f32[i];
}
} else if (op == abs1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fabs(f32[i]);
} else if (op == bin1) {
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] > 0)
f32[i] = 1.0f;
else
f32[i] = 0.0f;
}
} else if (op == binv1) {
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] > 0)
f32[i] = 0.0f;
else
f32[i] = 1.0f;
}
} else if (op == edge1) {
if ((nim->dx == 0.0) || (nim->dy == 0.0) || (nim->dz == 0.0)) {
fprintf(stderr, "edge requires non-zero pixdim1/pixdim2/pixdim3\n");
return 1;
}
flt xscl = 1.0 / (sqr(nim->dx));
flt yscl = 1.0 / (sqr(nim->dy));
flt zscl = 1.0 / (sqr(nim->dz));
flt xyzscl = 1.0 / (2.0 * sqrt(xscl + yscl + zscl));
if (nim->dim[3] < 2) { //no slices 'above' or 'below' for 2D
size_t nxy = nim->nx * nim->ny; //slice increment
int nvol = nim->nvox / nxy;
if ((nvol * nxy) != nim->nvox)
return 1;
#pragma omp parallel for
for (int v = 0; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0)
flt *inp = (flt *)_mm_malloc(nxy * sizeof(flt), 64);
flt *o32 = (flt *)f32;
o32 += v * nxy;
memcpy(inp, o32, nxy * sizeof(flt)); //dst, src
for (int y = 1; (y < (nim->ny - 1)); y++) {
size_t yo = y * nim->nx;
for (int x = 1; (x < (nim->nx - 1)); x++) {
size_t vx = yo + x;
flt xv = sqr(inp[vx + 1] - inp[vx - 1]) * xscl;
flt yv = sqr(inp[vx + nim->nx] - inp[vx - nim->nx]) * yscl;
o32[vx] = sqrt(xv + yv) * xyzscl;
} //x
} //y
_mm_free(inp);
} //for v
return 1;
} //edge for 2D volume(s)
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
size_t nxy = nim->nx * nim->ny; //slice increment
#pragma omp parallel for
for (int v = 0; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0)
flt *inp = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);
flt *o32 = (flt *)f32;
o32 += v * nvox3D;
memcpy(inp, o32, nvox3D * sizeof(flt)); //dst, src
for (int z = 1; (z < (nim->nz - 1)); z++) {
size_t zo = z * nxy;
for (int y = 1; (y < (nim->ny - 1)); y++) {
size_t yo = y * nim->nx;
for (int x = 1; (x < (nim->nx - 1)); x++) {
size_t vx = zo + yo + x;
flt xv = sqr(inp[vx + 1] - inp[vx - 1]) * xscl;
flt yv = sqr(inp[vx + nim->nx] - inp[vx - nim->nx]) * yscl;
flt zv = sqr(inp[vx + nxy] - inp[vx - nxy]) * zscl;
o32[vx] = sqrt(xv + yv + zv) * xyzscl;
} //x
} //y
} //z
_mm_free(inp);
} //for v
return 1; //edge for 3D volume(s)
} else if (op == index1) {
//nb FSLmaths flips dim[1] depending on determinant
size_t idx = 0;
if (!neg_determ(nim)) { //flip x
size_t nyzt = nim->nvox / nim->nx;
if ((nyzt * nim->nx) != nim->nvox)
return 1;
for (size_t i = 0; i < nyzt; i++) {
size_t row = i * nim->nx;
;
int x = nim->nx;
while (x > 0) {
x--;
if (f32[row + x] != 0)
f32[row + x] = idx++;
} //for each column (x)
} //for each row (yzt)
} else //don't flip x
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] != 0)
f32[i] = idx++;
} else if (op == nan1) {
for (size_t i = 0; i < nim->nvox; i++)
if (isnan(f32[i]))
f32[i] = 0.0;
} else if (op == nanm1) {
for (size_t i = 0; i < nim->nvox; i++)
if (isnan(f32[i]))
f32[i] = 1.0;
else
f32[i] = 0.0;
} else if (op == rand1) {
rand_test();
flt scl = (1.0 / RAND_MAX);
for (size_t i = 0; i < nim->nvox; i++)
f32[i] += rand() * scl;
} else if (op == randn1) {
rand_test();
//https://en.wikipedia.org/wiki/Box–Muller_transform
//for SIMD see https://github.com/miloyip/normaldist-benchmark
static const flt sigma = 1.0f;
static const flt mu = 0.0;
//static const flt epsilon = FLT_EPSILON;
static const flt two_pi = 2.0 * 3.14159265358979323846;
static const flt scl = (1.0 / RAND_MAX);
//fill pairs
for (size_t i = 0; i < (nim->nvox - 1); i += 2) {
flt u1, u2;
do {
u1 = rand() * scl;
u2 = rand() * scl;
} while (u1 <= epsilon);
flt su1 = sqrt(-2.0 * log(u1));
flt z0 = su1 * cos(two_pi * u2);
flt z1 = su1 * sin(two_pi * u2);
f32[i] += z0 * sigma + mu;
f32[i + 1] += z1 * sigma + mu;
}
//if odd, fill final voxel
if (nim->nvox % 2 != 0) {
flt u1, u2;
do {
u1 = rand() * scl;
u2 = rand() * scl;
} while (u1 <= epsilon);
flt z0 = sqrt(-2.0 * log(u1)) * cos(two_pi * u2);
f32[nim->nvox - 1] += z0 * sigma + mu;
}
} else if (op == range1) {
flt mn = f32[0];
flt mx = mn;
for (size_t i = 0; i < nim->nvox; i++) {
mn = fmin(f32[i], mn);
mx = fmax(f32[i], mx);
}
nim->cal_min = mn;
nim->cal_max = mx;
} else if (op == rank1) {
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
//you are always first if you are the only one to show up...
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = 1;
} else {
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
//how do we handle ties?
struct sortIdx *k = (struct sortIdx *)_mm_malloc(nvol * sizeof(struct sortIdx), 64);
size_t j = i;
for (int v = 0; v < nvol; v++) {
k[v].val = f32[j];
k[v].idx = j;
j += nvox3D;
}
int varies = 0;
for (int v = 0; v < nvol; v++) {
if (k[v].val != k[0].val) {
varies = 1;
break;
}
}
if (varies) {
qsort(k, nvol, sizeof(struct sortIdx), compare);
for (int v = 0; v < nvol; v++)
f32[k[v].idx] = v + 1;
} else {
j = i;
for (int v = 0; v < nvol; v++) {
f32[j] = v + 1;
j += nvox3D;
}
}
_mm_free(k);
} //for i
} //nvol > 1
} else if ((op == rank1) || (op == ranknorm1)) {
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
//you are always first if you are the only one to show up...
for (int i = 0; i < nim->nvox; i++)
f32[i] = 0;
} else {
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
struct sortIdx *k = (struct sortIdx *)_mm_malloc(nvol * sizeof(struct sortIdx), 64);
size_t j = i;
double sum = 0.0;
for (int v = 0; v < nvol; v++) {
k[v].val = f32[j];
sum += k[v].val;
k[v].idx = j;
j += nvox3D;
}
double mean = sum / nvol;
double sumSqr = 0.0;
for (int v = 0; v < nvol; v++)
sumSqr += sqr(k[v].val - mean);
double stdev = sqrt(sumSqr / (nvol - 1));
qsort(k, nvol, sizeof(struct sortIdx), compare);
//strange formula, but replicates fslmaths, consider nvol=3 rank[2,0,1] will be pval [2.5/3, 1.5/3, 0.5/3]
for (int v = 0; v < nvol; v++)
f32[k[v].idx] = (stdev * -qginv((double)(v + 0.5) / (double)nvol)) + mean;
_mm_free(k);
} //for i
} //nvol > 1
//double qginv( double p )
} else if (op == ztop1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = qg(f32[i]);
} else if (op == ptoz1) {
//given p, return x such that Q(x)=p, for 0 < p < 1
// #ifdef DT32
const flt kNaN = NAN;
//const flt kNaN = 0.0 / 0.0;
for (size_t i = 0; i < nim->nvox; i++) {
if ((f32[i] < 0.0) || (f32[i] > 1.0))
f32[i] = kNaN;
else
f32[i] = qginv(f32[i]);
}
} else if ((op == pval1) || (op == pval01)) {
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
fprintf(stderr, "permutation tests require 4D datasets.\n");
return 1;
}
void *dat = (void *)calloc(1, nvox3D * sizeof(flt));
flt *o32 = (flt *)dat;
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
size_t vi = i;
flt obs = f32[vi]; //observed value - see if it is extreme relative to permutations
int nNotZero = 0;
int nGreater = 0;
int nEqual = 0; //observation in first volume
flt f32v0 = f32[vi];
for (int v = 0; v < nvol; v++) {
if (f32[vi] != 0)
nNotZero++;
if (f32[vi] == f32v0)
nEqual++;
if (f32[vi] >= obs)
nGreater++;
vi += nvox3D;
}
if (op == pval1) {
//if (nEqual == nvol)
// o32[i] = 0.0;
//else
o32[i] = (double)nGreater / (double)nvol;
} else {
if (nEqual == nvol)
o32[i] = 0.0;
else if (obs == 0)
o32[i] = 1.0;
else //nZero must be at least 1: the observed data is not zero
o32[i] = (double)nGreater / (double)(nNotZero);
}
} //for i
nim->nvox = nvox3D;
nim->ndim = 3;
nim->nt = 1;
nim->dim[0] = 3;
nim->dim[4] = 1;
free(nim->data);
nim->data = dat;
} else if (op == cpval1) {
int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3];
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
fprintf(stderr, "permutation tests require 4D datasets.\n");
return 1;
}
void *dat = (void *)calloc(1, nvox3D * sizeof(flt));
flt *o32 = (flt *)dat;
flt *vmax = (flt *)_mm_malloc(nvol * sizeof(flt), 64);
#pragma omp parallel for
for (int v = 1; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0)
size_t vo = v * nvox3D;
flt mx = f32[vo];
for (int i = 0; i < nvox3D; i++)
mx = MAX(mx, f32[vo + i]);
vmax[v] = mx;
//printf("%d %g\n", v, mx);
}
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
flt obs = f32[i]; //observed value - see if it is extreme relative to permutations
int nGreater = 1; //count observation
for (int v = 1; v < nvol; v++)
if (vmax[v] >= obs)
nGreater++;
o32[i] = (double)nGreater / (double)nvol;
} //for i
_mm_free(vmax);
nim->nvox = nvox3D;
nim->ndim = 3;
nim->nt = 1;
nim->dim[0] = 3;
nim->dim[4] = 1;
free(nim->data);
nim->data = dat;
} else {
fprintf(stderr, "nifti_unary: Unsupported operation\n");
return 1;
}
return 0;
} //nifti_unary()
static int nifti_thrp(nifti_image *nim, double v, enum eOp op) {
// -thrp: use following percentage (0-100) of ROBUST RANGE to threshold current image (zero anything below the number)
// -thrP: use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold below
// -uthrp : use following percentage (0-100) of ROBUST RANGE to upper-threshold current image (zero anything above the number)
// -uthrP : use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold above
if ((v < 0.0) || (v > 100.0)) {
fprintf(stderr, "nifti_thrp: threshold should be between 0..100\n");
return 1;
}
flt pct2, pct98;
int ignoreZeroVoxels = 0;
if ((op == thrP) || (op == uthrP))
ignoreZeroVoxels = 1;
if (nifti_robust_range(nim, &pct2, &pct98, ignoreZeroVoxels) != 0)
return 1;
flt thresh = pct2 + ((v / 100.0) * (pct98 - pct2));
int modifyBrightVoxels = 0;
flt newIntensity = 0.0;
if ((op == clamp) || (op == uclamp))
newIntensity = thresh;
if ((op == uthrp) || (op == uthrP) || (op == uclamp))
modifyBrightVoxels = 1;
nifti_thr(nim, thresh, modifyBrightVoxels, newIntensity);
return 0;
} //nifti_thrp()
#ifdef DT32
int main32(int argc, char *argv[]) {
#else
int main64(int argc, char *argv[]) {
printf("beta: Using 64-bit calc\n");
#endif
char *fin = NULL, *fout = NULL;
//fslmaths in.nii out.nii changes datatype to flt, here we retain (similar to earlier versions of fslmaths)
//fslmsths in.nii -rem 10 out.nii uses integer modulus not fmod
//fslmaths robust range not fully described, this emulation is close
//fslmaths ing/inm are listed as "unary" but should be listed as binary
if (argc < 3)
return show_helpx(); //minimal command has input and output: "niimath in.nii out.nii"
int dtCalc = DT_FLOAT32; //data type for calculation
int dtOut = DT_FLOAT32; //data type for calculation
int ac = 1;
// '-dt' sets datatype for calculations
if (!strcmp(argv[ac], "-dt")) {
if (!strcmp(argv[ac + 1], "double")) {
dtCalc = DT_FLOAT64;
} else if (strcmp(argv[ac + 1], "float")) {
fprintf(stderr, "'-dt' error: only float or double calculations supported\n");
return 1;
}
ac += 2;
if (argc < (ac + 2))
return 1; //insufficient arguments remain
}
//special case: pass through
// no calculation, simple pass through copy, e.g. "niimaths in.nii out.nii.gz"
// note fslmaths would save as flt type... but lossless conversion in native format is faster
// note here we use nifti_image_read not nifti_image_read2 to preserve cal_min, cal_max
if (ac + 2 == argc) {
fin = argv[ac]; // no string copy, just pointer assignment
ac++;
nifti_image *nim = nifti_image_read(fin, 1);
fout = argv[ac]; // no string copy, just pointer assignment
ac++;
if (nifti_set_filenames(nim, fout, 0, 1))
return 1;
nifti_save(nim, ""); //nifti_image_write( nim );
nifti_image_free(nim);
return 0;
} //end pass through
// next argument is input file
fin = argv[ac]; // no string copy, just pointer assignment
ac++;
//clock_t startTime = clock();
nifti_image *nim = nifti_image_read2(fin, 1);
if (!nim) {
fprintf(stderr, "** failed to read NIfTI image from '%s'\n", fin);
return 2;
}
//printf("read time: %ld ms\n", timediff(startTime, clock()));
in_hdr ihdr = set_input_hdr(nim);
int nkernel = 0; //number of voxels in kernel
int *kernel = make_kernel(nim, &nkernel, 3, 3, 3);
//check for "-odt" must be last couplet
if (!strcmp(argv[argc - 2], "-odt")) {
if (!strcmp(argv[argc - 1], "double")) {
dtOut = DT_FLOAT64;
} else if (!strcmp(argv[argc - 1], "flt")) {
dtOut = DT_FLOAT32;
} else if (!strcmp(argv[argc - 1], "int")) {
dtOut = DT_INT32;
} else if (!strcmp(argv[argc - 1], "short")) {
dtOut = DT_INT16;
} else if (!strcmp(argv[argc - 1], "ushort")) {
dtOut = DT_UINT16;
} else if (!strcmp(argv[argc - 1], "char")) {
dtOut = DT_UINT8;
} else if (!strcmp(argv[argc - 1], "input")) {
dtOut = nim->datatype; //ihdr.datatype; //!
} else {
fprintf(stderr, "Error: Unknown datatype '%s' - Possible datatypes are: char short ushort int flt double input\n", argv[argc - 1]);
return 2;
}
argc = argc - 2;
} //odt
//convert data to calculation type (-dt)
if (nifti_image_change_datatype(nim, dtCalc, &ihdr) != 0)
return 1;
//check output filename, e.g does file exist
fout = argv[argc - 1]; // no string copy, just pointer assignment
if (nifti_set_filenames(nim, fout, 0, 1))
return 1;
argc = argc - 1;
#if defined(_OPENMP)
const int maxNumThreads = omp_get_max_threads();
const char *key = "AFNI_COMPRESSOR";
char *value;
value = getenv(key);
//export AFNI_COMPRESSOR=PIGZ
char pigzKey[5] = "PIGZ";
if ((value != NULL) && (strstr(value, pigzKey))) {
omp_set_num_threads(maxNumThreads);
fprintf(stderr, "Using %d threads\n", maxNumThreads);
} else {
omp_set_num_threads(1);
fprintf(stderr, "Single threaded\n");
}
#endif
//read operations
char *end;
int ok = 0;
while (ac < argc) {
enum eOp op = unknown;
if (!strcmp(argv[ac], "-add"))
op = add;
if (!strcmp(argv[ac], "-sub"))
op = sub;
if (!strcmp(argv[ac], "-mul"))
op = mul;
if (!strcmp(argv[ac], "-div"))
op = divX;
if (!strcmp(argv[ac], "-rem"))
op = rem;
if (!strcmp(argv[ac], "-mod"))
op = mod;
if (!strcmp(argv[ac], "-mas"))
op = mas;
if (!strcmp(argv[ac], "-thr"))
op = thr;
if (!strcmp(argv[ac], "-thrp"))
op = thrp;
if (!strcmp(argv[ac], "-thrP"))
op = thrP;
if (!strcmp(argv[ac], "-uthr"))
op = uthr;
if (!strcmp(argv[ac], "-uthrp"))
op = uthrp;
if (!strcmp(argv[ac], "-uthrP"))
op = uthrP;
if (!strcmp(argv[ac], "-clamp"))
op = clamp;
if (!strcmp(argv[ac], "-uclamp"))
op = uclamp;
if (!strcmp(argv[ac], "-max"))
op = max;
if (!strcmp(argv[ac], "-min"))
op = min;
if (!strcmp(argv[ac], "-max"))
op = max;
//if ( ! strcmp(argv[ac], "-addtozero") ) op = addtozero; //variation of mas
//if ( ! strcmp(argv[ac], "-overadd") ) op = overadd; //variation of mas
if (!strcmp(argv[ac], "power"))
op = power;
if (!strcmp(argv[ac], "-seed"))
op = seed;
//if ( ! strcmp(argv[ac], "-restart") ) op = restart;
//if ( ! strcmp(argv[ac], "-save") ) op = save;
if (!strcmp(argv[ac], "-inm"))
op = inm;
if (!strcmp(argv[ac], "-ing"))
op = ing;
if (!strcmp(argv[ac], "-s"))
op = smth;
if (!strcmp(argv[ac], "-exp"))
op = exp1;
if (!strcmp(argv[ac], "-ceil"))
op = ceil1;
if (!strcmp(argv[ac], "-round"))
op = ceil1;
if (!strcmp(argv[ac], "-floor"))
op = floor1;
if (!strcmp(argv[ac], "-trunc"))
op = trunc1;
if (!strcmp(argv[ac], "-log"))
op = log1;
if (!strcmp(argv[ac], "-sin"))
op = sin1;
if (!strcmp(argv[ac], "-cos"))
op = cos1;
if (!strcmp(argv[ac], "-tan"))
op = tan1;
if (!strcmp(argv[ac], "-asin"))
op = asin1;
if (!strcmp(argv[ac], "-acos"))
op = acos1;
if (!strcmp(argv[ac], "-atan"))
op = atan1;
if (!strcmp(argv[ac], "-sqr"))
op = sqr1;
if (!strcmp(argv[ac], "-sqrt"))
op = sqrt1;
if (!strcmp(argv[ac], "-recip"))
op = recip1;
if (!strcmp(argv[ac], "-abs"))
op = abs1;
if (!strcmp(argv[ac], "-bin"))
op = bin1;
if (!strcmp(argv[ac], "-binv"))
op = binv1;
if (!strcmp(argv[ac], "-edge"))
op = edge1;
if (!strcmp(argv[ac], "-index"))
op = index1;
if (!strcmp(argv[ac], "-nan"))
op = nan1;
if (!strcmp(argv[ac], "-nanm"))
op = nanm1;
if (!strcmp(argv[ac], "-rand"))
op = rand1;
if (!strcmp(argv[ac], "-randn"))
op = randn1;
if (!strcmp(argv[ac], "-range"))
op = range1;
if (!strcmp(argv[ac], "-rank"))
op = rank1;
if (!strcmp(argv[ac], "-ranknorm"))
op = ranknorm1;
if (!strcmp(argv[ac], "-ztop"))
op = ztop1;
if (!strcmp(argv[ac], "-ptoz"))
op = ptoz1;
if (!strcmp(argv[ac], "-pval"))
op = pval1;
if (!strcmp(argv[ac], "-pval0"))
op = pval01;
if (!strcmp(argv[ac], "-cpval"))
op = cpval1;
//kernel operations
if (!strcmp(argv[ac], "-dilM"))
op = dilMk;
if (!strcmp(argv[ac], "-dilD"))
op = dilDk;
if (!strcmp(argv[ac], "-dilF"))
op = dilFk;
if (!strcmp(argv[ac], "-dilall"))
op = dilallk;
if (!strcmp(argv[ac], "-ero"))
op = erok;
if (!strcmp(argv[ac], "-eroF"))
op = eroFk;
if (!strcmp(argv[ac], "-fmedian"))
op = fmediank;
if (!strcmp(argv[ac], "-fmean"))
op = fmeank;
if (!strcmp(argv[ac], "-fmeanu"))
op = fmeanuk;
if (!strcmp(argv[ac], "-p")) {
ac++;
#if defined(_OPENMP)
int nProcessors = atoi(argv[ac]);
if (nProcessors < 1) {
omp_set_num_threads(maxNumThreads);
fprintf(stderr, "Using %d threads\n", maxNumThreads);
} else {
omp_set_num_threads(nProcessors);
printf("Using %d threads\n", nProcessors);
}
#else
fprintf(stderr, "Warning: not compiled for OpenMP: '-p' ignored\n");
#endif
} else
//All Dimensionality reduction operations names begin with Capital letter, no other commands do!
if ((strlen(argv[ac]) > 4) && (argv[ac][0] == '-') && (isupper(argv[ac][1]))) { //isupper
int dim = 0;
switch (argv[ac][1]) {
case 'X': //
dim = 1;
break;
case 'Y': // code to be executed if n = 2;
dim = 2;
break;
case 'Z': //
dim = 3;
break;
case 'T': // code to be executed if n = 2;
dim = 4;
break;
}
if (dim == 0) {
fprintf(stderr, "Error: unknown dimensionality reduction operation: %s\n", argv[ac]);
goto fail;
}
if (strstr(argv[ac], "mean"))
ok = nifti_dim_reduce(nim, Tmean, dim, 0);
else if (strstr(argv[ac], "std"))
ok = nifti_dim_reduce(nim, Tstd, dim, 0);
else if (strstr(argv[ac], "maxn"))
ok = nifti_dim_reduce(nim, Tmaxn, dim, 0); //test maxn BEFORE max
else if (strstr(argv[ac], "max"))
ok = nifti_dim_reduce(nim, Tmax, dim, 0);
else if (strstr(argv[ac], "min"))
ok = nifti_dim_reduce(nim, Tmin, dim, 0);
else if (strstr(argv[ac], "median"))
ok = nifti_dim_reduce(nim, Tmedian, dim, 0);
else if (strstr(argv[ac], "perc")) {
ac++;
int pct = atoi(argv[ac]);
ok = nifti_dim_reduce(nim, Tperc, dim, pct);
} else if (strstr(argv[ac], "ar1"))
ok = nifti_dim_reduce(nim, Tar1, dim, 0);
else {
fprintf(stderr, "Error unknown dimensionality reduction operation: %s\n", argv[ac]);
ok = 1;
}
} else if (!strcmp(argv[ac], "-roi")) {
//int , int , int , int , int , int , int , int )
if ((argc - ac) < 8) {
fprintf(stderr, "not enough arguments for '-roi'\n"); //start.size for 4 dimensions: user might forget volumes
goto fail;
}
ac++;
int xmin = atoi(argv[ac]);
ac++;
int xsize = atoi(argv[ac]);
ac++;
int ymin = atoi(argv[ac]);
ac++;
int ysize = atoi(argv[ac]);
ac++;
int zmin = atoi(argv[ac]);
ac++;
int zsize = atoi(argv[ac]);
ac++;
int tmin = atoi(argv[ac]);
ac++;
int tsize = atoi(argv[ac]);
nifti_roi(nim, xmin, xsize, ymin, ysize, zmin, zsize, tmin, tsize);
} else if (!strcmp(argv[ac], "-bptfm")) {
ac++;
double hp_sigma = strtod(argv[ac], &end);
ac++;
double lp_sigma = strtod(argv[ac], &end);
ok = nifti_bptf(nim, hp_sigma, lp_sigma, 0);
} else if (!strcmp(argv[ac], "-bptf")) {
ac++;
double hp_sigma = strtod(argv[ac], &end);
ac++;
double lp_sigma = strtod(argv[ac], &end);
//ok = nifti_bptf(nim, hp_sigma, lp_sigma);
ok = nifti_bptf(nim, hp_sigma, lp_sigma, 1);
#ifdef bandpass
} else if (!strcmp(argv[ac], "-bandpass")) {
// niimath test4D -bandpass 0.08 0.008 0 c
ac++;
double lp_hz = strtod(argv[ac], &end);
ac++;
double hp_hz = strtod(argv[ac], &end);
ac++;
double TRsec = strtod(argv[ac], &end);
ok = nifti_bandpass(nim, lp_hz, hp_hz, TRsec);
#endif
} else if (!strcmp(argv[ac], "-roc")) {
//-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth>
//-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth>
ac++;
double thresh = strtod(argv[ac], &end);
ac++;
int outfile = ac;
char *fnoise = NULL;
if (thresh > 0.0) {
ac++;
fnoise = argv[ac];
}
ac++;
int truth = ac;
//ok = nifti_bptf(nim, hp_sigma, lp_sigma);
ok = nifti_roc(nim, fabs(thresh), argv[outfile], fnoise, argv[truth]);
if (ac >= argc) {
fprintf(stderr, "Error: no output filename specified!\n"); //e.g. volume size might differ
goto fail;
}
} else if (!strcmp(argv[ac], "-unsharp")) {
ac++;
double sigma = strtod(argv[ac], &end);
ac++;
double amount = strtod(argv[ac], &end);
nifti_unsharp(nim, sigma, sigma, sigma, amount);
} else if (!strcmp(argv[ac], "-otsu"))
ok = nifti_otsu(nim, 0);
else if (!strcmp(argv[ac], "-otsu0"))
ok = nifti_otsu(nim, 1);
else if (!strcmp(argv[ac], "-subsamp2"))
ok = nifti_subsamp2(nim, 0);
else if (!strcmp(argv[ac], "-subsamp2offc"))
ok = nifti_subsamp2(nim, 1);
else if (!strcmp(argv[ac], "-sobel_binary"))
ok = nifti_sobel(nim, 1, 1);
else if (!strcmp(argv[ac], "-sobel"))
ok = nifti_sobel(nim, 1, 0);
else if (!strcmp(argv[ac], "-demean"))
ok = nifti_demean(nim);
else if (!strcmp(argv[ac], "-detrend"))
ok = nifti_detrend_linear(nim);
else if (!strcmp(argv[ac], "-resize")) {
ac++;
double X = strtod(argv[ac], &end);
ac++;
double Y = strtod(argv[ac], &end);
ac++;
double Z = strtod(argv[ac], &end);
ac++;
int interp_method = atoi(argv[ac]);
ok = nifti_resize(nim, X, Y, Z, interp_method);
} else if (!strcmp(argv[ac], "-crop")) {
ac++;
int tmin = atoi(argv[ac]);
ac++;
int tsize = atoi(argv[ac]);
ok = nifti_crop(nim, tmin, tsize);
} else if (!strcmp(argv[ac], "--compare")) { //--function terminates without saving image
ac++;
nifti_compare(nim, argv[ac]); //always terminates
} else if (!strcmp(argv[ac], "-edt"))
ok = nifti_edt(nim);
else if (!strcmp(argv[ac], "-fillh"))
ok = nifti_fillh(nim, 0);
else if (!strcmp(argv[ac], "-fillh26"))
ok = nifti_fillh(nim, 1);
else if (!strcmp(argv[ac], "-kernel")) {
ac++;
if (kernel != NULL)
_mm_free(kernel);
kernel = NULL;
if (!strcmp(argv[ac], "3D"))
kernel = make_kernel(nim, &nkernel, 3, 3, 3);
if (!strcmp(argv[ac], "2D"))
kernel = make_kernel(nim, &nkernel, 3, 3, 1);
if (!strcmp(argv[ac], "boxv")) {
ac++;
int vx = atoi(argv[ac]);
kernel = make_kernel(nim, &nkernel, vx, vx, vx);
}
if (!strcmp(argv[ac], "sphere")) {
ac++;
double mm = strtod(argv[ac], &end);
kernel = make_kernel_sphere(nim, &nkernel, mm);
}
if (!strcmp(argv[ac], "file")) {
ac++;
kernel = make_kernel_file(nim, &nkernel, argv[ac]);
}
if (!strcmp(argv[ac], "gauss")) {
ac++;
double mm = strtod(argv[ac], &end);
kernel = make_kernel_gauss(nim, &nkernel, mm);
}
if (!strcmp(argv[ac], "box")) { //all voxels in a cube of width <size> mm centered on target voxel");
ac++;
double mm = strtod(argv[ac], &end);
int vx = (2 * floor(mm / nim->dx)) + 1;
int vy = (2 * floor(mm / nim->dy)) + 1;
int vz = (2 * floor(mm / nim->dz)) + 1;
kernel = make_kernel(nim, &nkernel, vx, vy, vz);
}
if (!strcmp(argv[ac], "boxv3")) {
ac++;
int vx = atoi(argv[ac]);
ac++;
int vy = atoi(argv[ac]);
ac++;
int vz = atoi(argv[ac]);
kernel = make_kernel(nim, &nkernel, vx, vy, vz);
}
if (kernel == NULL) {
fprintf(stderr, "Error: '-kernel' option failed.\n"); //e.g. volume size might differ
ok = 1;
}
} else if (!strcmp(argv[ac], "-tensor_2lower")) {
ok = nifti_tensor_2(nim, 0);
} else if (!strcmp(argv[ac], "-tensor_2upper")) {
ok = nifti_tensor_2(nim, 1);
} else if (!strcmp(argv[ac], "-tensor_decomp")) {
ok = nifti_tensor_decomp(nim, 1);
} else if (!strcmp(argv[ac], "-tensor_decomp_lower")) {
ok = nifti_tensor_decomp(nim, 0);
} else if (!strcmp(argv[ac], "-slicetimer")) {
#ifdef slicetimer
ok = nifti_slicetimer(nim);
#else
fprintf(stderr, "Recompile to support slice timer\n"); //e.g. volume size might differ
ok = 1;
#endif
} else if (!strcmp(argv[ac], "-save")) {
ac++;
char *fout2 = argv[ac];
if (nifti_set_filenames(nim, fout2, 1, 1))
ok = 1;
else {
nifti_save(nim, ""); //nifti_image_write( nim );
nifti_set_filenames(nim, fout, 1, 1);
}
} else if (!strcmp(argv[ac], "-restart")) {
if (kernel != NULL)
fprintf(stderr, "Warning: 'restart' resets the kernel\n"); //e.g. volume size might differ
nifti_image_free(nim);
if (kernel != NULL)
_mm_free(kernel);
kernel = make_kernel(nim, &nkernel, 3, 3, 3);
ac++;
nim = nifti_image_read(argv[ac], 1);
if (!nim)
ok = 1; //error
} else if (!strcmp(argv[ac], "-grid")) {
ac++;
double v = strtod(argv[ac], &end);
ac++;
int s = atoi(argv[ac]);
ok = nifti_grid(nim, v, s);
} else if (!strcmp(argv[ac], "-dog1")) {
ac++;
double pos = strtod(argv[ac], &end);
ac++;
double neg = strtod(argv[ac], &end);
ok = nifti_dog(nim, pos, neg, 1);
} else if (!strcmp(argv[ac], "-dog2")) {
ac++;
double pos = strtod(argv[ac], &end);
ac++;
double neg = strtod(argv[ac], &end);
ok = nifti_dog(nim, pos, neg, 2);
} else if (!strcmp(argv[ac], "-dog")) {
ac++;
double pos = strtod(argv[ac], &end);
ac++;
double neg = strtod(argv[ac], &end);
ok = nifti_dog(nim, pos, neg, 0);
} else if (!strcmp(argv[ac], "-tfce")) {
ac++;
double H = strtod(argv[ac], &end);
ac++;
double E = strtod(argv[ac], &end);
ac++;
int c = atoi(argv[ac]);
ok = nifti_tfce(nim, H, E, c);
} else if (!strcmp(argv[ac], "-tfceS")) {
ac++;
double H = strtod(argv[ac], &end);
ac++;
double E = strtod(argv[ac], &end);
ac++;
int c = atoi(argv[ac]);
ac++;
int x = atoi(argv[ac]);
ac++;
int y = atoi(argv[ac]);
ac++;
int z = atoi(argv[ac]);
ac++;
double tfce_thresh = strtod(argv[ac], &end);
ok = nifti_tfceS(nim, H, E, c, x, y, z, tfce_thresh);
} else if (op == unknown) {
fprintf(stderr, "!!Error: unsupported operation '%s'\n", argv[ac]);
goto fail;
}
if ((op >= dilMk) && (op <= fmeanuk))
ok = nifti_kernel(nim, op, kernel, nkernel);
if ((op >= exp1) && (op <= ptoz1))
nifti_unary(nim, op);
if ((op >= add) && (op < exp1)) { //binary operations
ac++;
double v = strtod(argv[ac], &end);
//if (end == argv[ac]) {
if (strlen(argv[ac]) != (end - argv[ac])) { // "4d" will return numeric "4"
if ((op == power) || (op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP) || (op == seed)) {
fprintf(stderr, "Error: '%s' expects numeric value\n", argv[ac - 1]);
goto fail;
} else
ok = nifti_binary(nim, argv[ac], op);
} else {
if (op == add)
ok = nifti_rescale(nim, 1.0, v);
if (op == sub)
ok = nifti_rescale(nim, 1.0, -v);
if (op == mul)
ok = nifti_rescale(nim, v, 0.0);
if (op == divX)
ok = nifti_rescale(nim, 1.0 / v, 0.0);
if (op == mod)
ok = nifti_rem(nim, v, 1);
if (op == rem)
ok = nifti_rem(nim, v, 0);
if (op == mas) {
fprintf(stderr, "Error: -mas expects image not number\n");
goto fail;
}
if (op == power)
ok = nifti_binary_power(nim, v);
if (op == thr)
ok = nifti_thr(nim, v, 0, 0.0);
if ((op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP))
ok = nifti_thrp(nim, v, op);
if (op == uthr)
ok = nifti_thr(nim, v, 1, 0.0);
if (op == max)
ok = nifti_max(nim, v, 0);
if (op == min)
ok = nifti_max(nim, v, 1);
if (op == inm)
ok = nifti_inm(nim, v);
if (op == ing)
ok = nifti_ing(nim, v);
if (op == smth)
ok = nifti_smooth_gauss(nim, v, v, v, -6.0);
if (op == seed) {
if ((v > 0) && (v < 1))
v *= RAND_MAX;
srand((unsigned)fabs(v));
}
}
} //binary operations
if (ok != 0)
goto fail;
ac++;
}
//convert data to output type (-odt)
if (nifti_image_change_datatype(nim, dtOut, &ihdr) != 0)
return 1;
// if we get here, write the output dataset
//startTime = clock();
nifti_save(nim, ""); //nifti_image_write( nim );
//printf("write time: %ld ms\n", timediff(startTime, clock()));
// and clean up memory
nifti_image_free(nim);
if (kernel != NULL)
_mm_free(kernel);
return 0;
fail:
nifti_image_free(nim);
if (kernel != NULL)
_mm_free(kernel);
return 1;
} //main() |
perftest.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "api/libperf.h"
#include "lib/libperf_int.h"
#include <ucs/sys/string.h>
#include <ucs/sys/sys.h>
#include <ucs/sys/sock.h>
#include <ucs/debug/log.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <netdb.h>
#include <getopt.h>
#include <string.h>
#include <sys/types.h>
#include <sys/poll.h>
#include <locale.h>
#if HAVE_MPI
# include <mpi.h>
#elif HAVE_RTE
# include<rte.h>
#endif
#define MAX_BATCH_FILES 32
#define TL_RESOURCE_NAME_NONE "<none>"
#define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:"
enum {
TEST_FLAG_PRINT_RESULTS = UCS_BIT(0),
TEST_FLAG_PRINT_TEST = UCS_BIT(1),
TEST_FLAG_SET_AFFINITY = UCS_BIT(8),
TEST_FLAG_NUMERIC_FMT = UCS_BIT(9),
TEST_FLAG_PRINT_FINAL = UCS_BIT(10),
TEST_FLAG_PRINT_CSV = UCS_BIT(11)
};
typedef struct sock_rte_group {
int is_server;
int connfd;
} sock_rte_group_t;
typedef struct test_type {
const char *name;
ucx_perf_api_t api;
ucx_perf_cmd_t command;
ucx_perf_test_type_t test_type;
const char *desc;
} test_type_t;
struct perftest_context {
ucx_perf_params_t params;
const char *server_addr;
int port;
int mpi;
unsigned cpu;
unsigned flags;
unsigned num_batch_files;
char *batch_files[MAX_BATCH_FILES];
char *test_names[MAX_BATCH_FILES];
sock_rte_group_t sock_rte_group;
};
test_type_t tests[] = {
{"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"active message latency"},
{"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG,
"atomic add latency"},
{"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / rate"},
{"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / rate"},
{"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / rate"},
{"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"active message bandwidth / message rate"},
{"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth / message rate"},
{"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add message rate"},
{"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG,
"tag match latency"},
{"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag match bandwidth"},
{"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG,
"tag sync match latency"},
{"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag sync match bandwidth"},
{"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth"},
{"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add bandwidth / message rate"},
{"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / bandwidth / rate"},
{"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / bandwidth / rate"},
{"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / bandwidth / rate"},
{"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"stream bandwidth"},
{"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG,
"stream latency"},
{NULL}
};
static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int),
int poll_events, void *data, size_t size,
void (*progress)(void *arg), void *arg, const char *name)
{
size_t total = 0;
struct pollfd pfd;
int ret;
while (total < size) {
pfd.fd = sock;
pfd.events = poll_events;
pfd.revents = 0;
ret = poll(&pfd, 1, 1); /* poll for 1ms */
if (ret > 0) {
ucs_assert(ret == 1);
ucs_assert(pfd.revents & poll_events);
ret = sock_call(sock, (char*)data + total, size - total, 0);
if (ret < 0) {
ucs_error("%s() failed: %m", name);
return -1;
}
total += ret;
} else if ((ret < 0) && (errno != EINTR)) {
ucs_error("poll(fd=%d) failed: %m", sock);
return -1;
}
/* progress user context */
if (progress != NULL) {
progress(arg);
}
}
return 0;
}
static int safe_send(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
typedef ssize_t (*sock_call)(int, void *, size_t, int);
return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send");
}
static int safe_recv(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv");
}
static void print_progress(char **test_names, unsigned num_names,
const ucx_perf_result_t *result, unsigned flags,
int final)
{
static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n";
static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n";
static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n";
unsigned i;
if (!(flags & TEST_FLAG_PRINT_RESULTS) ||
(!final && (flags & TEST_FLAG_PRINT_FINAL)))
{
return;
}
if (flags & TEST_FLAG_PRINT_CSV) {
for (i = 0; i < num_names; ++i) {
printf("%s,", test_names[i]);
}
}
printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv :
(flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric :
fmt_plain,
(double)result->iters,
result->latency.typical * 1000000.0,
result->latency.moment_average * 1000000.0,
result->latency.total_average * 1000000.0,
result->bandwidth.moment_average / (1024.0 * 1024.0),
result->bandwidth.total_average / (1024.0 * 1024.0),
result->msgrate.moment_average,
result->msgrate.total_average);
fflush(stdout);
}
static void print_header(struct perftest_context *ctx)
{
const char *test_api_str;
const char *test_data_str;
test_type_t *test;
unsigned i;
if (ctx->flags & TEST_FLAG_PRINT_TEST) {
for (test = tests; test->name; ++test) {
if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) {
break;
}
}
if (test->name != NULL) {
if (test->api == UCX_PERF_API_UCT) {
test_api_str = "transport layer";
switch (ctx->params.uct.data_layout) {
case UCT_PERF_DATA_LAYOUT_SHORT:
test_data_str = "short";
break;
case UCT_PERF_DATA_LAYOUT_BCOPY:
test_data_str = "bcopy";
break;
case UCT_PERF_DATA_LAYOUT_ZCOPY:
test_data_str = "zcopy";
break;
default:
test_data_str = "(undefined)";
break;
}
} else if (test->api == UCX_PERF_API_UCP) {
test_api_str = "protocol layer";
test_data_str = "(automatic)"; /* TODO contig/stride/stream */
} else {
return;
}
printf("+------------------------------------------------------------------------------------------+\n");
printf("| API: %-60s |\n", test_api_str);
printf("| Test: %-60s |\n", test->desc);
printf("| Data layout: %-60s |\n", test_data_str);
printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params));
}
}
if (ctx->flags & TEST_FLAG_PRINT_CSV) {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
for (i = 0; i < ctx->num_batch_files; ++i) {
printf("%s,", basename(ctx->batch_files[i]));
}
printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n");
}
} else {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("+--------------+-----------------------------+---------------------+-----------------------+\n");
printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
printf("| # iterations | typical | average | overall | average | overall | average | overall |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
} else if (ctx->flags & TEST_FLAG_PRINT_TEST) {
printf("+------------------------------------------------------------------------------------------+\n");
}
}
}
static void print_test_name(struct perftest_context *ctx)
{
char buf[200];
unsigned i, pos;
if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) {
strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+");
pos = 1;
for (i = 0; i < ctx->num_batch_files; ++i) {
if (i != 0) {
buf[pos++] = '/';
}
memcpy(&buf[pos], ctx->test_names[i],
ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1));
pos += strlen(ctx->test_names[i]);
}
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("%s\n", buf);
}
}
}
static void usage(const struct perftest_context *ctx, const char *program)
{
static const char* api_names[] = {
[UCX_PERF_API_UCT] = "UCT",
[UCX_PERF_API_UCP] = "UCP"
};
test_type_t *test;
int UCS_V_UNUSED rank;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ctx->mpi && (rank != 0)) {
return;
}
#endif
#if HAVE_MPI
printf(" Note: test can be also launched as an MPI application\n");
printf("\n");
#elif HAVE_RTE
printf(" Note: this test can be also launched as an libRTE application\n");
printf("\n");
#endif
printf(" Usage: %s [ server-hostname ] [ options ]\n", program);
printf("\n");
printf(" Common options:\n");
printf(" -t <test> test to run:\n");
for (test = tests; test->name; ++test) {
printf(" %13s - %s %s\n", test->name,
api_names[test->api], test->desc);
}
printf("\n");
printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n",
ctx->params.msg_size_list[0]);
printf(" for example: \"-s 16,48,8192,8192,14\"\n");
printf(" -n <iters> number of iterations to run (%ld)\n", ctx->params.max_iter);
printf(" -w <iters> number of warm-up iterations (%zu)\n",
ctx->params.warmup_iter);
printf(" -c <cpu> set affinity to this CPU (off)\n");
printf(" -O <count> maximal number of uncompleted outstanding sends (%u)\n",
ctx->params.max_outstanding);
printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n",
ctx->params.iov_stride);
printf(" -T <threads> number of threads in the test (%d), if >1 implies \"-M multi\"\n",
ctx->params.thread_count);
printf(" -B register memory with NONBLOCK flag\n");
printf(" -b <file> read and execute tests from a batch file: every line in the\n");
printf(" file is a test to run, first word is test name, the rest of\n");
printf(" the line is command-line arguments for the test.\n");
printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port);
#if HAVE_MPI
printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi);
#endif
printf(" -h show this help message\n");
printf("\n");
printf(" Output format:\n");
printf(" -N use numeric formatting (thousands separator)\n");
printf(" -f print only final numbers\n");
printf(" -v print CSV-formatted output\n");
printf("\n");
printf(" UCT only:\n");
printf(" -d <device> device to use for testing\n");
printf(" -x <tl> transport to use for testing\n");
printf(" -D <layout> data layout for sender side:\n");
printf(" short - short messages (default, cannot be used for get)\n");
printf(" bcopy - copy-out (cannot be used for atomics)\n");
printf(" zcopy - zero-copy (cannot be used for atomics)\n");
printf(" iov - scatter-gather list (iovec)\n");
printf(" -W <count> flow control window size, for active messages (%u)\n",
ctx->params.uct.fc_window);
printf(" -H <size> active message header size (%zu)\n",
ctx->params.am_hdr_size);
printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n");
printf(" thread_spinlock - separate progress thread with spin locking\n");
printf(" thread_mutex - separate progress thread with mutex locking\n");
printf(" signal - signal-based timer\n");
printf("\n");
printf(" UCP only:\n");
printf(" -M <thread> thread support level for progress engine (single)\n");
printf(" single - only the master thread can access\n");
printf(" serialized - one thread can access at a time\n");
printf(" multi - multiple threads can access\n");
printf(" -D <layout>[,<layout>]\n");
printf(" data layout for sender and receiver side (contig)\n");
printf(" contig - Continuous datatype\n");
printf(" iov - Scatter-gather list\n");
printf(" -C use wild-card tag for tag tests\n");
printf(" -U force unexpected flow by using tag probe\n");
printf(" -r <mode> receive mode for stream tests (recv)\n");
printf(" recv : Use ucp_stream_recv_nb\n");
printf(" recv_data : Use ucp_stream_recv_data_nb\n");
printf(" -m <mem type> memory type of messages\n");
printf(" host - system memory(default)\n");
if (ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA] != NULL) {
printf(" cuda - NVIDIA GPU memory\n");
}
if (ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA_MANAGED] != NULL) {
printf(" cuda-managed - NVIDIA cuda managed/unified memory\n");
}
printf("\n");
printf(" NOTE: When running UCP tests, transport and device should be specified by\n");
printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n");
printf("\n");
}
static ucs_status_t parse_ucp_datatype_params(const char *optarg,
ucp_perf_datatype_t *datatype)
{
const char *iov_type = "iov";
const size_t iov_type_size = strlen("iov");
const char *contig_type = "contig";
const size_t contig_type_size = strlen("contig");
if (0 == strncmp(optarg, iov_type, iov_type_size)) {
*datatype = UCP_PERF_DATATYPE_IOV;
} else if (0 == strncmp(optarg, contig_type, contig_type_size)) {
*datatype = UCP_PERF_DATATYPE_CONTIG;
} else {
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
}
static ucs_status_t parse_message_sizes_params(const char *optarg,
ucx_perf_params_t *params)
{
const char delim = ',';
size_t *msg_size_list, token_num, token_it;
char *optarg_ptr, *optarg_ptr2;
optarg_ptr = (char *)optarg;
token_num = 0;
/* count the number of given message sizes */
while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) {
++optarg_ptr;
++token_num;
}
++token_num;
msg_size_list = realloc(params->msg_size_list,
sizeof(*params->msg_size_list) * token_num);
if (NULL == msg_size_list) {
return UCS_ERR_NO_MEMORY;
}
params->msg_size_list = msg_size_list;
optarg_ptr = (char *)optarg;
errno = 0;
for (token_it = 0; token_it < token_num; ++token_it) {
params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10);
if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) ||
((errno != 0) && (params->msg_size_list[token_it] == 0)) ||
(optarg_ptr == optarg_ptr2)) {
free(params->msg_size_list);
params->msg_size_list = NULL; /* prevent double free */
ucs_error("Invalid option substring argument at position %lu", token_it);
return UCS_ERR_INVALID_PARAM;
}
optarg_ptr = optarg_ptr2 + 1;
}
params->msg_size_cnt = token_num;
return UCS_OK;
}
static ucs_status_t init_test_params(ucx_perf_params_t *params)
{
memset(params, 0, sizeof(*params));
params->api = UCX_PERF_API_LAST;
params->command = UCX_PERF_CMD_LAST;
params->test_type = UCX_PERF_TEST_TYPE_LAST;
params->thread_mode = UCS_THREAD_MODE_SINGLE;
params->thread_count = 1;
params->async_mode = UCS_ASYNC_THREAD_LOCK_TYPE;
params->wait_mode = UCX_PERF_WAIT_MODE_LAST;
params->max_outstanding = 1;
params->warmup_iter = 10000;
params->am_hdr_size = 8;
params->alignment = ucs_get_page_size();
params->max_iter = 1000000l;
params->max_time = 0.0;
params->report_interval = 1.0;
params->flags = UCX_PERF_TEST_FLAG_VERBOSE;
params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW;
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
params->mem_type = UCS_MEMORY_TYPE_HOST;
params->msg_size_cnt = 1;
params->iov_stride = 0;
params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG;
params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG;
strcpy(params->uct.dev_name, TL_RESOURCE_NAME_NONE);
strcpy(params->uct.tl_name, TL_RESOURCE_NAME_NONE);
params->msg_size_list = calloc(params->msg_size_cnt,
sizeof(*params->msg_size_list));
if (params->msg_size_list == NULL) {
return UCS_ERR_NO_MEMORY;
}
params->msg_size_list[0] = 8;
return UCS_OK;
}
static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg)
{
test_type_t *test;
char *optarg2 = NULL;
switch (opt) {
case 'd':
ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name),
"%s", optarg);
return UCS_OK;
case 'x':
ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name),
"%s", optarg);
return UCS_OK;
case 't':
for (test = tests; test->name; ++test) {
if (!strcmp(optarg, test->name)) {
params->api = test->api;
params->command = test->command;
params->test_type = test->test_type;
break;
}
}
if (test->name == NULL) {
ucs_error("Invalid option argument for -t");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'D':
if (!strcmp(optarg, "short")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
} else if (!strcmp(optarg, "bcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY;
} else if (!strcmp(optarg, "zcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY;
} else if (UCS_OK == parse_ucp_datatype_params(optarg,
¶ms->ucp.send_datatype)) {
optarg2 = strchr(optarg, ',');
if (optarg2) {
if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1,
¶ms->ucp.recv_datatype)) {
return UCS_ERR_INVALID_PARAM;
}
}
} else {
ucs_error("Invalid option argument for -D");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'i':
params->iov_stride = atol(optarg);
return UCS_OK;
case 'n':
params->max_iter = atol(optarg);
return UCS_OK;
case 's':
return parse_message_sizes_params(optarg, params);
case 'H':
params->am_hdr_size = atol(optarg);
return UCS_OK;
case 'W':
params->uct.fc_window = atoi(optarg);
return UCS_OK;
case 'O':
params->max_outstanding = atoi(optarg);
return UCS_OK;
case 'w':
params->warmup_iter = atol(optarg);
return UCS_OK;
case 'o':
params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED;
return UCS_OK;
case 'B':
params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK;
return UCS_OK;
case 'q':
params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE;
return UCS_OK;
case 'C':
params->flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD;
return UCS_OK;
case 'U':
params->flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE;
return UCS_OK;
case 'M':
if (!strcmp(optarg, "single")) {
params->thread_mode = UCS_THREAD_MODE_SINGLE;
return UCS_OK;
} else if (!strcmp(optarg, "serialized")) {
params->thread_mode = UCS_THREAD_MODE_SERIALIZED;
return UCS_OK;
} else if (!strcmp(optarg, "multi")) {
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -M");
return UCS_ERR_INVALID_PARAM;
}
case 'T':
params->thread_count = atoi(optarg);
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
case 'A':
if (!strcmp(optarg, "thread") || !strcmp(optarg, "thread_spinlock")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK;
return UCS_OK;
} else if (!strcmp(optarg, "thread_mutex")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_MUTEX;
return UCS_OK;
} else if (!strcmp(optarg, "signal")) {
params->async_mode = UCS_ASYNC_MODE_SIGNAL;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -A");
return UCS_ERR_INVALID_PARAM;
}
case 'r':
if (!strcmp(optarg, "recv_data")) {
params->flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
} else if (!strcmp(optarg, "recv")) {
params->flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
}
return UCS_ERR_INVALID_PARAM;
case 'm':
if (!strcmp(optarg, "host")) {
params->mem_type = UCS_MEMORY_TYPE_HOST;
return UCS_OK;
} else if (!strcmp(optarg, "cuda") &&
(ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA] != NULL)) {
params->mem_type = UCS_MEMORY_TYPE_CUDA;
return UCS_OK;
} else if (!strcmp(optarg, "cuda-managed") &&
(ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA_MANAGED] != NULL)) {
params->mem_type = UCS_MEMORY_TYPE_CUDA_MANAGED;
return UCS_OK;
}
ucs_error("Unsupported memory type: \"%s\"", optarg);
return UCS_ERR_INVALID_PARAM;
default:
return UCS_ERR_INVALID_PARAM;
}
}
static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name,
int *line_num, ucx_perf_params_t *params,
char** test_name_p)
{
#define MAX_SIZE 256
#define MAX_ARG_SIZE 2048
ucs_status_t status;
char buf[MAX_ARG_SIZE];
int argc;
char *argv[MAX_SIZE + 1];
int c;
char *p;
do {
if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) {
return UCS_ERR_NO_ELEM;
}
++(*line_num);
argc = 0;
p = strtok(buf, " \t\n\r");
while (p && (argc < MAX_SIZE)) {
argv[argc++] = p;
p = strtok(NULL, " \t\n\r");
}
argv[argc] = NULL;
} while ((argc == 0) || (argv[0][0] == '#'));
optind = 1;
while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) {
status = parse_test_params(params, c, optarg);
if (status != UCS_OK) {
ucs_error("in batch file '%s' line %d: -%c %s: %s",
file_name, *line_num, c, optarg, ucs_status_string(status));
return status;
}
}
*test_name_p = strdup(argv[0]);
return UCS_OK;
}
static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized,
int argc, char **argv)
{
ucs_status_t status;
int c;
ucs_trace_func("");
ucx_perf_global_init(); /* initialize memory types */
status = init_test_params(&ctx->params);
if (status != UCS_OK) {
return status;
}
ctx->server_addr = NULL;
ctx->num_batch_files = 0;
ctx->port = 13337;
ctx->flags = 0;
ctx->mpi = mpi_initialized;
optind = 1;
while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) {
switch (c) {
case 'p':
ctx->port = atoi(optarg);
break;
case 'b':
if (ctx->num_batch_files < MAX_BATCH_FILES) {
ctx->batch_files[ctx->num_batch_files++] = optarg;
}
break;
case 'N':
ctx->flags |= TEST_FLAG_NUMERIC_FMT;
break;
case 'f':
ctx->flags |= TEST_FLAG_PRINT_FINAL;
break;
case 'v':
ctx->flags |= TEST_FLAG_PRINT_CSV;
break;
case 'c':
ctx->flags |= TEST_FLAG_SET_AFFINITY;
ctx->cpu = atoi(optarg);
break;
case 'P':
#if HAVE_MPI
ctx->mpi = atoi(optarg) && mpi_initialized;
break;
#endif
case 'h':
usage(ctx, ucs_basename(argv[0]));
return UCS_ERR_CANCELED;
default:
status = parse_test_params(&ctx->params, c, optarg);
if (status != UCS_OK) {
usage(ctx, ucs_basename(argv[0]));
return status;
}
break;
}
}
if (optind < argc) {
ctx->server_addr = argv[optind];
}
return UCS_OK;
}
static unsigned sock_rte_group_size(void *rte_group)
{
return 2;
}
static unsigned sock_rte_group_index(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->is_server ? 0 : 1;
}
static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
sock_rte_group_t *group = rte_group;
const unsigned magic = 0xdeadbeef;
unsigned sync;
sync = magic;
safe_send(group->connfd, &sync, sizeof(unsigned), progress, arg);
sync = 0;
safe_recv(group->connfd, &sync, sizeof(unsigned), progress, arg);
ucs_assert(sync == magic);
}
#pragma omp barrier
}
static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
sock_rte_group_t *group = rte_group;
size_t size;
int i;
size = 0;
for (i = 0; i < iovcnt; ++i) {
size += iovec[i].iov_len;
}
safe_send(group->connfd, &size, sizeof(size), NULL, NULL);
for (i = 0; i < iovcnt; ++i) {
safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL,
NULL);
}
}
static void sock_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
sock_rte_group_t *group = rte_group;
int group_index;
size_t size;
group_index = sock_rte_group_index(rte_group);
if (src == group_index) {
return;
}
ucs_assert_always(src == (1 - group_index));
safe_recv(group->connfd, &size, sizeof(size), NULL, NULL);
ucs_assert_always(size <= max);
safe_recv(group->connfd, buffer, size, NULL, NULL);
}
static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t sock_rte = {
.group_size = sock_rte_group_size,
.group_index = sock_rte_group_index,
.barrier = sock_rte_barrier,
.post_vec = sock_rte_post_vec,
.recv = sock_rte_recv,
.exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function,
.report = sock_rte_report,
};
static ucs_status_t setup_sock_rte(struct perftest_context *ctx)
{
struct sockaddr_in inaddr;
struct hostent *he;
ucs_status_t status;
int optval = 1;
int sockfd, connfd;
int ret;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
ucs_error("socket() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err;
}
if (ctx->server_addr == NULL) {
optval = 1;
status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR,
&optval, sizeof(optval));
if (status != UCS_OK) {
goto err_close_sockfd;
}
inaddr.sin_family = AF_INET;
inaddr.sin_port = htons(ctx->port);
inaddr.sin_addr.s_addr = INADDR_ANY;
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("bind() failed: %m");
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
ret = listen(sockfd, 10);
if (ret < 0) {
ucs_error("listen() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
printf("Waiting for connection...\n");
/* Accept next connection */
connfd = accept(sockfd, NULL, NULL);
if (connfd < 0) {
ucs_error("accept() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
close(sockfd);
ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
if (ctx->params.msg_size_cnt) {
ctx->params.msg_size_list = calloc(ctx->params.msg_size_cnt,
sizeof(*ctx->params.msg_size_list));
if (NULL == ctx->params.msg_size_list) {
status = UCS_ERR_NO_MEMORY;
goto err_close_connfd;
}
ret = safe_recv(connfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) *
ctx->params.msg_size_cnt,
NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
}
ctx->sock_rte_group.connfd = connfd;
ctx->sock_rte_group.is_server = 1;
} else {
he = gethostbyname(ctx->server_addr);
if (he == NULL || he->h_addr_list == NULL) {
ucs_error("host %s not found: %s", ctx->server_addr,
hstrerror(h_errno));
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
inaddr.sin_family = he->h_addrtype;
inaddr.sin_port = htons(ctx->port);
ucs_assert(he->h_length == sizeof(inaddr.sin_addr));
memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length);
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("connect() failed: %m");
status = UCS_ERR_UNREACHABLE;
goto err_close_sockfd;
}
safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.msg_size_cnt) {
safe_send(sockfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.connfd = sockfd;
ctx->sock_rte_group.is_server = 0;
}
if (ctx->sock_rte_group.is_server) {
ctx->flags |= TEST_FLAG_PRINT_TEST;
} else {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = &ctx->sock_rte_group;
ctx->params.rte = &sock_rte;
ctx->params.report_arg = ctx;
return UCS_OK;
err_close_connfd:
close(connfd);
goto err;
err_close_sockfd:
close(sockfd);
err:
return status;
}
static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx)
{
close(ctx->sock_rte_group.connfd);
return UCS_OK;
}
#if HAVE_MPI
static unsigned mpi_rte_group_size(void *rte_group)
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static unsigned mpi_rte_group_index(void *rte_group)
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
int group_size, my_rank, i;
MPI_Request *reqs;
int nreqs = 0;
int dummy;
int flag;
#pragma omp barrier
#pragma omp master
/*
* Naive non-blocking barrier implementation over send/recv, to call user
* progress while waiting for completion.
* Not using MPI_Ibarrier to be compatible with MPI-1.
*/
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
/* allocate maximal possible number of requests */
reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size);
if (my_rank == 0) {
/* root gathers "ping" from all other ranks */
for (i = 1; i < group_size; ++i) {
MPI_Irecv(&dummy, 0, MPI_INT,
i /* source */,
1 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
} else {
/* every non-root rank sends "ping" and waits for "pong" */
MPI_Send(&dummy, 0, MPI_INT,
0 /* dest */,
1 /* tag */,
MPI_COMM_WORLD);
MPI_Irecv(&dummy, 0, MPI_INT,
0 /* source */,
2 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
/* Waiting for receive requests */
do {
MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE);
progress(arg);
} while (!flag);
if (my_rank == 0) {
/* root sends "pong" to all ranks */
for (i = 1; i < group_size; ++i) {
MPI_Send(&dummy, 0, MPI_INT,
i /* dest */,
2 /* tag */,
MPI_COMM_WORLD);
}
}
#pragma omp barrier
}
static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
int group_size;
int my_rank;
int dest, i;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
for (dest = 0; dest < group_size; ++dest) {
if (dest == my_rank) {
continue;
}
for (i = 0; i < iovcnt; ++i) {
MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest,
i == (iovcnt - 1), /* Send last iov with tag == 1 */
MPI_COMM_WORLD);
}
}
*req = (void*)(uintptr_t)1;
}
static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max,
void *req)
{
MPI_Status status;
size_t offset;
int my_rank;
int count;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (src == my_rank) {
return;
}
offset = 0;
do {
ucs_assert_always(offset < max);
MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_BYTE, &count);
offset += count;
} while (status.MPI_TAG != 1);
}
static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t mpi_rte = {
.group_size = mpi_rte_group_size,
.group_index = mpi_rte_group_index,
.barrier = mpi_rte_barrier,
.post_vec = mpi_rte_post_vec,
.recv = mpi_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = mpi_rte_report,
};
#elif HAVE_RTE
static unsigned ext_rte_group_size(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_size(group);
}
static unsigned ext_rte_group_index(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_rank(group);
}
static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
rte_group_t group = (rte_group_t)rte_group;
int rc;
rc = rte_barrier(group);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_barrier");
}
}
#pragma omp barrier
}
static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec,
int iovcnt, void **req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session;
rte_iovec_t *r_vec;
int i, rc;
rc = rte_srs_session_create(group, 0, &session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_create");
}
r_vec = calloc(iovcnt, sizeof(rte_iovec_t));
if (r_vec == NULL) {
return;
}
for (i = 0; i < iovcnt; ++i) {
r_vec[i].iov_base = iovec[i].iov_base;
r_vec[i].type = rte_datatype_uint8_t;
r_vec[i].count = iovec[i].iov_len;
}
rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_set_data");
}
*req = session;
free(r_vec);
}
static void ext_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session = (rte_srs_session_t)req;
void *rte_buffer = NULL;
rte_iovec_t r_vec;
uint32_t offset;
int size;
int rc;
rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src),
"KEY_PERF", &rte_buffer, &size);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_get_data");
return;
}
r_vec.iov_base = buffer;
r_vec.type = rte_datatype_uint8_t;
r_vec.count = max;
offset = 0;
rte_unpack(&r_vec, rte_buffer, &offset);
rc = rte_srs_session_destroy(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_destroy");
}
free(rte_buffer);
}
static void ext_rte_exchange_vec(void *rte_group, void * req)
{
rte_srs_session_t session = (rte_srs_session_t)req;
int rc;
rc = rte_srs_exchange_data(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_exchange_data");
}
}
static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t ext_rte = {
.group_size = ext_rte_group_size,
.group_index = ext_rte_group_index,
.barrier = ext_rte_barrier,
.report = ext_rte_report,
.post_vec = ext_rte_post_vec,
.recv = ext_rte_recv,
.exchange_vec = ext_rte_exchange_vec,
};
#endif
static ucs_status_t setup_mpi_rte(struct perftest_context *ctx)
{
ucs_trace_func("");
#if HAVE_MPI
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 2) {
ucs_error("This test should run with exactly 2 processes (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 1) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = NULL;
ctx->params.rte = &mpi_rte;
ctx->params.report_arg = ctx;
#elif HAVE_RTE
rte_group_t group;
rte_init(NULL, NULL, &group);
if (1 == rte_group_rank(group)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = group;
ctx->params.rte = &ext_rte;
ctx->params.report_arg = ctx;
#endif
return UCS_OK;
}
static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx)
{
#if HAVE_RTE
rte_finalize();
#endif
return UCS_OK;
}
static ucs_status_t check_system(struct perftest_context *ctx)
{
cpu_set_t cpuset;
unsigned i, count, nr_cpus;
int ret;
ucs_trace_func("");
ret = sysconf(_SC_NPROCESSORS_CONF);
if (ret < 0) {
ucs_error("failed to get local cpu count: %m");
return UCS_ERR_INVALID_PARAM;
}
nr_cpus = ret;
memset(&cpuset, 0, sizeof(cpuset));
if (ctx->flags & TEST_FLAG_SET_AFFINITY) {
if (ctx->cpu >= nr_cpus) {
ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1);
return UCS_ERR_INVALID_PARAM;
}
CPU_SET(ctx->cpu, &cpuset);
ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_setaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
} else {
ret = sched_getaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_getaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
count = 0;
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, &cpuset)) {
++count;
}
}
if (count > 2) {
ucs_warn("CPU affinity is not set (bound to %u cpus)."
" Performance may be impacted.", count);
}
}
return UCS_OK;
}
static ucs_status_t clone_params(ucx_perf_params_t *dest,
const ucx_perf_params_t *src)
{
size_t msg_size_list_size;
*dest = *src;
msg_size_list_size = dest->msg_size_cnt * sizeof(*dest->msg_size_list);
dest->msg_size_list = malloc(msg_size_list_size);
if (dest->msg_size_list == NULL) {
return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK);
}
memcpy(dest->msg_size_list, src->msg_size_list, msg_size_list_size);
return UCS_OK;
}
static ucs_status_t run_test_recurs(struct perftest_context *ctx,
ucx_perf_params_t *parent_params,
unsigned depth)
{
ucx_perf_params_t params;
ucx_perf_result_t result;
ucs_status_t status;
FILE *batch_file;
int line_num;
ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files);
if (parent_params->api == UCX_PERF_API_UCP) {
if (strcmp(parent_params->uct.dev_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.dev_name);
}
if (strcmp(parent_params->uct.tl_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.tl_name);
}
}
if (depth >= ctx->num_batch_files) {
print_test_name(ctx);
return ucx_perf_run(parent_params, &result);
}
batch_file = fopen(ctx->batch_files[depth], "r");
if (batch_file == NULL) {
ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]);
return UCS_ERR_IO_ERROR;
}
status = clone_params(¶ms, parent_params);
if (status != UCS_OK) {
goto out;
}
line_num = 0;
while ((status = read_batch_file(batch_file, ctx->batch_files[depth],
&line_num, ¶ms,
&ctx->test_names[depth])) == UCS_OK) {
run_test_recurs(ctx, ¶ms, depth + 1);
free(params.msg_size_list);
free(ctx->test_names[depth]);
ctx->test_names[depth] = NULL;
status = clone_params(¶ms, parent_params);
if (status != UCS_OK) {
goto out;
}
}
if (status == UCS_ERR_NO_ELEM) {
status = UCS_OK;
}
free(params.msg_size_list);
out:
fclose(batch_file);
return status;
}
static ucs_status_t run_test(struct perftest_context *ctx)
{
ucs_status_t status;
ucs_trace_func("");
setlocale(LC_ALL, "en_US");
print_header(ctx);
status = run_test_recurs(ctx, &ctx->params, 0);
if (status != UCS_OK) {
ucs_error("Failed to run test: %s", ucs_status_string(status));
}
return status;
}
int main(int argc, char **argv)
{
struct perftest_context ctx;
ucs_status_t status;
int mpi_initialized;
int mpi_rte;
int ret;
#if HAVE_MPI
mpi_initialized = !isatty(0) && (MPI_Init(&argc, &argv) == 0);
#else
mpi_initialized = 0;
#endif
/* Parse command line */
status = parse_opts(&ctx, mpi_initialized, argc, argv);
if (status != UCS_OK) {
ret = (status == UCS_ERR_CANCELED) ? 0 : -127;
goto out;
}
#ifdef __COVERITY__
/* coverity[dont_call] */
mpi_rte = rand(); /* Shut up deadcode error */
#endif
if (ctx.mpi) {
mpi_rte = 1;
} else {
#if HAVE_RTE
mpi_rte = 1;
#else
mpi_rte = 0;
#endif
}
status = check_system(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Create RTE */
status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Run the test */
status = run_test(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_cleanup_rte;
}
ret = 0;
out_cleanup_rte:
(mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx);
out:
if (ctx.params.msg_size_list) {
free(ctx.params.msg_size_list);
}
if (mpi_initialized) {
#if HAVE_MPI
MPI_Finalize();
#endif
}
return ret;
}
|
GB_binop__lt_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int16)
// A*D function (colscale): GB (_AxD__lt_int16)
// D*A function (rowscale): GB (_DxB__lt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int16)
// C=scalar+B GB (_bind1st__lt_int16)
// C=scalar+B' GB (_bind1st_tran__lt_int16)
// C=A+scalar GB (_bind2nd__lt_int16)
// C=A'+scalar GB (_bind2nd_tran__lt_int16)
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT16 || GxB_NO_LT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calc_pi_atomic.c | #define _POSIX_C_SOURCE 200112L
//Johannes Hartmann, Jonas Einig
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dirent.h>
#include <ctype.h>
#include <math.h>
#include <omp.h>
void getparams(int argc, char **argv, int *iterations);
int isNumber(char *number);
void calculatePi(int iterations);
int main(int argc, char **argv)
{
int iterations;
getparams(argc, argv, &iterations);
calculatePi(iterations);
return EXIT_SUCCESS;
}
void getparams(int argc, char **argv, int *iterations)
{
if (argc == 2 && isNumber(argv[1])) {
*iterations = atoi(argv[1]);
if (*iterations >= 1) {
return;
}
}
fprintf(stderr, "Insert a number greater 1!\n");
exit(EXIT_FAILURE);
}
int isNumber(char *number)
{
int length = strlen(number);
for (int i = 0; i < length; i++) {
if (!isdigit(number[i])) {
return 0;
}
}
return 1;
}
void calculatePi(int iterations)
{
double oneStep = 1.0 / iterations;
double lowbound = 0;
double higherbound = 0;
#pragma omp parallel for
for (int i = 1; i <= iterations; i++) {
if (i < iterations) {
#pragma omp atomic
lowbound += (sqrt(1 - pow(i * oneStep, 2)) * oneStep);
}
#pragma omp atomic
higherbound += (sqrt(1 - pow((i - 1) * oneStep, 2)) * oneStep);
}
lowbound = lowbound * 4;
higherbound = higherbound * 4;
printf("Lower Bound: %.12f\n", lowbound);
printf("Upper Bound: %.12f\n", higherbound);
}
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng, Hang Zhang
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#include "../tensor/elemwise_binary_broadcast_op.h"
#if defined(USE_MKL) && defined(_OPENMP) && !defined(__CUDACC__)
#define MXNET_USE_MKL_DROPOUT 1
#endif
#if MXNET_USE_MKL_DROPOUT
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // MXNET_USE_MKL_DROPOUT
#define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
const int MAX_DIM = 5;
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
mxnet::TShape axes;
dmlc::optional<bool> cudnn_off;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0))
.describe("Axes for variational dropout kernel.");
DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false))
.describe("Whether to turn off cudnn in dropout operator. "
"This option is ignored if axes is specified.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if MXNET_USE_MKL_DROPOUT
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + abs(genImpl.rand() % 4096);
CHECK_GE(seed, 0);
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
static inline bool MKLAvailable() {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
return sizeof(DType) >= sizeof(int);
}
// MKL forward pass
inline void MKLForward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
Stream<xpu> *s = ctx.get_stream<xpu>();
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
BernoulliGenerate(*pgen, count, this->pkeep_, maskptr);
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
outptr[i] = dataptr[i] * maskptr[i] * pk_1;
}
}
// MKL backward pass
inline void MKLBackward(const OpContext &ctx,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1;
}
}
#endif // #if MXNET_USE_MKL_DROPOUT
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
struct BernoulliKernel {
/*! \brief Bernoulli kernel for generating mask */
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *mask_out,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
});
}
};
explicit DropoutOp(const DropoutParam ¶m, Context ctx) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
this->axes_ = param.axes;
this->dropout_passthrough_ = true;
#if MXNET_USE_CUDNN_DROPOUT
this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value();
this->ctx_ = ctx;
if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
dtype_ = mshadow::DataType<DType>::kCudnnFlag;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_));
CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
~DropoutOp() {
#if MXNET_USE_CUDNN_DROPOUT
if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_));
CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
inline bool CuDNNAvailable() {
return this->pkeep_ > 0 && !this->cudnn_off_;
}
inline void CuDNNForward(const OpContext &ctx,
const TBlob &in,
const TBlob &mask,
const TBlob &out) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// set dropout state.
ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_);
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = out.Size();
stride[0] = out.Size();
stride[1] = out.Size();
stride[2] = out.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_));
// cudnn uses bits to record the positions that are dropped, so reserve bytes is always
// 1/8 of input size.
CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) <<
"The size of the mask space is smaller than the required cudnn reserved space.";
CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_,
dropout_desc_,
x_desc_,
in.dptr<DType>(),
y_desc_,
out.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
inline void CuDNNBackward(const OpContext &ctx,
const TBlob &out_grad,
const TBlob &mask,
const TBlob &in_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = in_grad.Size();
stride[0] = in_grad.Size();
stride[1] = in_grad.Size();
stride[2] = in_grad.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_,
dropout_desc_,
dy_desc_,
out_grad.dptr<DType>(),
dx_desc_,
in_grad.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
this->dropout_passthrough_ = true;
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &in = in_data[dropout::kData];
const TBlob &out = out_data[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) {
this->dropout_passthrough_ = false;
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLForward(ctx, in_data, out_data);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNForward(ctx, in, mask, out);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
CHECK(req[dropout::kOut] != kAddTo);
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in.dptr<DType>(),
this->pkeep_);
return;
} else {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
// initialize the mask
LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(),
mask.dptr<DType>(),
this->pkeep_);
// broadcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(in.shape_,
mask.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>(),
mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[dropout::kOut],
lstride, rstride, oshape,
in.dptr<DType>(),
mask.dptr<DType>(), out.dptr<DType>());
});
}
}
} else {
if (req[dropout::kOut] == kWriteInplace) return;
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>());
});
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (!this->dropout_passthrough_) {
this->dropout_passthrough_ = true;
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLBackward(ctx, in_grad, out_data, out_grad);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNBackward(ctx, grad, mask, gdata);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
// standard case for dropout
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
return;
} else {
// broardcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(grad.shape_,
mask.shape_, gdata.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape,
grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>());
});
}
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
/*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */
mxnet::TShape axes_;
/*! \brief Flag to record whether forward is executed in pass-through mode */
bool dropout_passthrough_;
#if MXNET_USE_CUDNN_DROPOUT
bool cudnn_off_;
Context ctx_;
cudnnDataType_t dtype_;
cudnnDropoutDescriptor_t dropout_desc_;
uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
size_t dropout_reserve_byte_;
cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_;
#endif // MXNET_USE_CUDNN_DROPOUT
}; // class DropoutOp
template<typename xpu>
void DropoutCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Forward(ctx, inputs, req, outputs);
});
}
template<typename xpu>
void DropoutGradCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1);
CHECK_EQ(req.size(), 1);
std::vector<TBlob> out_grads(2);
std::vector<TBlob> out_data(2);
out_grads[dropout::kOut] = inputs[0];
out_data[dropout::kMask] = inputs[1];
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Backward(ctx, out_grads, out_data, req, outputs);
});
}
} // namespace op
} // namespace mxnet
#undef MXNET_USE_MKL_DROPOUT
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
lotus5_fmt_plug.c | //original work by Jeff Fay
//some optimisations by bartavelle at bandecon.com
/* OpenMP support and further optimizations (including some code rewrites)
* by Solar Designer */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_lotus5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_lotus5);
#else
#include <stdio.h>
#include <string.h>
#include "misc.h"
#include "formats.h"
#include "common.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#ifdef __x86_64__
#define LOTUS_N 3
#define LOTUS_N_STR " X3"
#else
#define LOTUS_N 2
#define LOTUS_N_STR " X2"
#endif
/*preprocessor constants that John The Ripper likes*/
#define FORMAT_LABEL "lotus5"
#define FORMAT_NAME "Lotus Notes/Domino 5"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR LOTUS_N_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 16
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT LOTUS_N
/* Must be divisible by any LOTUS_N (thus, by 2 and 3) */
#define MAX_KEYS_PER_CRYPT 0x900
/*A struct used for JTR's benchmarks*/
static struct fmt_tests tests[] = {
{"06E0A50B579AD2CD5FFDC48564627EE7", "secret"},
{"355E98E7C7B59BD810ED845AD0FD2FC4", "password"},
{"CD2D90E8E00D8A2A63A81F531EA8A9A3", "lotus"},
{"69D90B46B1AC0912E5CCF858094BBBFC", "dirtydog"},
{NULL}
};
static const unsigned char lotus_magic_table[] = {
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36
};
/*Some more JTR variables*/
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int n = omp_get_max_threads();
if (n < 1)
n = 1;
n *= 2;
if (n > self->params.max_keys_per_crypt)
n = self->params.max_keys_per_crypt;
self->params.min_keys_per_crypt = n;
#endif
crypt_key = mem_calloc_align(sizeof(*crypt_key),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
/*Utility function to convert hex to bin */
static void * get_binary(char *ciphertext)
{
static ARCH_WORD_32 out[BINARY_SIZE/4];
char *realcipher = (char*)out;
int i;
for (i = 0; i < BINARY_SIZE; i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
return (void*)out;
}
/*Another function required by JTR: decides whether we have a valid
* ciphertext */
static int
valid (char *ciphertext, struct fmt_main *self)
{
int i;
for (i = 0; i < CIPHERTEXT_LENGTH; i++)
if (!(((ciphertext[i] >= '0') && (ciphertext[i] <= '9'))
//|| ((ciphertext[i] >= 'a') && (ciphertext[i] <= 'f'))
|| ((ciphertext[i] >= 'A') && (ciphertext[i] <= 'F'))))
{
return 0;
}
return !ciphertext[i];
}
/*sets the value of saved_key so we can play with it*/
static void set_key (char *key, int index)
{
strnzcpy (saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
/*retrieves the saved key; used by JTR*/
static char * get_key (int index)
{
return saved_key[index];
}
static int cmp_all (void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one (void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact (char *source, int index)
{
return 1;
}
/*Beginning of private functions*/
/* Takes the plaintext password and generates the second row of our
* working matrix for the final call to the mixing function*/
static void MAYBE_INLINE
#if LOTUS_N == 3
lotus_transform_password (unsigned char *i0, unsigned char *o0,
unsigned char *i1, unsigned char *o1,
unsigned char *i2, unsigned char *o2)
#else
lotus_transform_password (unsigned char *i0, unsigned char *o0,
unsigned char *i1, unsigned char *o1)
#endif
{
unsigned char t0, t1;
#if LOTUS_N == 3
unsigned char t2;
#endif
int i;
#if LOTUS_N == 3
t0 = t1 = t2 = 0;
#else
t0 = t1 = 0;
#endif
for (i = 0; i < 8; i++)
{
t0 = *o0++ = lotus_magic_table[ARCH_INDEX(*i0++ ^ t0)];
t1 = *o1++ = lotus_magic_table[ARCH_INDEX(*i1++ ^ t1)];
#if LOTUS_N == 3
t2 = *o2++ = lotus_magic_table[ARCH_INDEX(*i2++ ^ t2)];
#endif
t0 = *o0++ = lotus_magic_table[ARCH_INDEX(*i0++ ^ t0)];
t1 = *o1++ = lotus_magic_table[ARCH_INDEX(*i1++ ^ t1)];
#if LOTUS_N == 3
t2 = *o2++ = lotus_magic_table[ARCH_INDEX(*i2++ ^ t2)];
#endif
}
}
/* The mixing function: perturbs the first three rows of the matrix*/
#if LOTUS_N == 3
static void lotus_mix (unsigned char *m0, unsigned char *m1,
unsigned char *m2)
#else
static void lotus_mix (unsigned char *m0, unsigned char *m1)
#endif
{
unsigned char t0, t1;
unsigned char *p0, *p1;
#if LOTUS_N == 3
unsigned char t2;
unsigned char *p2;
#endif
int i, j;
#if LOTUS_N == 3
t0 = t1 = t2 = 0;
#else
t0 = t1 = 0;
#endif
for (i = 18; i > 0; i--)
{
p0 = m0;
p1 = m1;
#if LOTUS_N == 3
p2 = m2;
#endif
for (j = 48; j > 0; j--)
{
t0 = p0[0] ^= lotus_magic_table[ARCH_INDEX(j + t0)];
t1 = p1[0] ^= lotus_magic_table[ARCH_INDEX(j + t1)];
#if LOTUS_N == 3
t2 = p2[0] ^= lotus_magic_table[ARCH_INDEX(j + t2)];
#endif
j--;
t0 = p0[1] ^= lotus_magic_table[ARCH_INDEX(j + t0)];
p0 += 2;
t1 = p1[1] ^= lotus_magic_table[ARCH_INDEX(j + t1)];
p1 += 2;
#if LOTUS_N == 3
t2 = p2[1] ^= lotus_magic_table[ARCH_INDEX(j + t2)];
p2 += 2;
#endif
}
}
}
/*the last public function; generates ciphertext*/
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += LOTUS_N) {
struct {
union {
unsigned char m[64];
unsigned char m4[4][16];
ARCH_WORD m4w[4][16 / ARCH_SIZE];
} u;
} ctx[LOTUS_N];
int password_length;
memset(ctx[0].u.m4[0], 0, 16);
password_length = strlen(saved_key[index]);
memset(ctx[0].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[0].u.m4[1], saved_key[index], password_length);
memcpy(ctx[0].u.m4[2], ctx[0].u.m4[1], 16);
memset(ctx[1].u.m4[0], 0, 16);
password_length = strlen(saved_key[index + 1]);
memset(ctx[1].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[1].u.m4[1], saved_key[index + 1], password_length);
memcpy(ctx[1].u.m4[2], ctx[1].u.m4[1], 16);
#if LOTUS_N == 3
memset(ctx[2].u.m4[0], 0, 16);
password_length = strlen(saved_key[index + 2]);
memset(ctx[2].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[2].u.m4[1], saved_key[index + 2], password_length);
memcpy(ctx[2].u.m4[2], ctx[2].u.m4[1], 16);
lotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],
ctx[1].u.m4[1], ctx[1].u.m4[3],
ctx[2].u.m4[1], ctx[2].u.m4[3]);
lotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);
#else
lotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],
ctx[1].u.m4[1], ctx[1].u.m4[3]);
lotus_mix(ctx[0].u.m, ctx[1].u.m);
#endif
memcpy(ctx[0].u.m4[1], ctx[0].u.m4[3], 16);
memcpy(ctx[1].u.m4[1], ctx[1].u.m4[3], 16);
#if LOTUS_N == 3
memcpy(ctx[2].u.m4[1], ctx[2].u.m4[3], 16);
#endif
{
int i;
for (i = 0; i < 16 / ARCH_SIZE; i++) {
ctx[0].u.m4w[2][i] = ctx[0].u.m4w[0][i] ^ ctx[0].u.m4w[1][i];
ctx[1].u.m4w[2][i] = ctx[1].u.m4w[0][i] ^ ctx[1].u.m4w[1][i];
#if LOTUS_N == 3
ctx[2].u.m4w[2][i] = ctx[2].u.m4w[0][i] ^ ctx[2].u.m4w[1][i];
#endif
}
}
#if LOTUS_N == 3
lotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);
#else
lotus_mix(ctx[0].u.m, ctx[1].u.m);
#endif
memcpy(crypt_key[index], ctx[0].u.m4[0], BINARY_SIZE);
memcpy(crypt_key[index + 1], ctx[1].u.m4[0], BINARY_SIZE);
#if LOTUS_N == 3
memcpy(crypt_key[index + 2], ctx[2].u.m4[0], BINARY_SIZE);
#endif
}
return count;
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
static int binary_hash_0(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_0; }
static int binary_hash_1(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_1; }
static int binary_hash_2(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_2; }
static int binary_hash_3(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_3; }
static int binary_hash_4(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_4; }
static int binary_hash_5(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_5; }
static int binary_hash_6(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_6; }
/* C's version of a class specifier */
struct fmt_main fmt_lotus5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
pi_omp_sumvector.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* Parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#if _EXTRAE_
#include "extrae_user_events.h"
// Extrae Constants
#define PROGRAM 1000
#define END 0
#define SERIAL 1
#define PARALLEL 2
#else
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
#endif
#define NUMTHRDS 16
double sumvector[NUMTHRDS]; // sum for each thread
int main(int argc, char *argv[]) {
#if _EXTRAE_
Extrae_event (PROGRAM, SERIAL);
#else
double stamp;
START_COUNT_TIME;
#endif
double x, sum=0.0, pi=0.0;
double step;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
long int num_steps = atoi(argv[1]);
step = 1.0/(double) num_steps;
#if _EXTRAE_
Extrae_event (PROGRAM, END);
#endif
/* do computation -- using all available threads */
#if _EXTRAE_
Extrae_event (PROGRAM, PARALLEL);
#endif
for (int i=0; i<NUMTHRDS; i++)
sumvector[i] = 0.0;
#pragma omp parallel private(x)
{
int myid = omp_get_thread_num();
#pragma omp for
for (long int i=0; i<num_steps; ++i) {
x = (i+0.5)*step;
sumvector[myid] += 4.0/(1.0+x*x);
}
}
for (int i=0; i<NUMTHRDS; i++)
sum += sumvector[i];
#if _EXTRAE_
Extrae_event (PROGRAM, END);
Extrae_event (PROGRAM, SERIAL);
#endif
pi = step * sum;
/* print results */
printf("Number pi after %ld iterations = %.15f\n", num_steps, pi);
#if _EXTRAE_
Extrae_event (PROGRAM, END);
#else
STOP_COUNT_TIME("Total execution time");
#endif
return EXIT_SUCCESS;
}
|
residual_based_pseudo_static_displacement_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME )
#define KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/residual_based_bossak_displacement_scheme.hpp"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedPseudoStaticDisplacementScheme
* @ingroup KratosCore
* @brief This is a pseudo-static scheme
* @details For pseudo–static strategy: calculate the constant matrices D = Beta * M, "set" M = 0 after initializing the damping matrix
* @note Based on Riccardo Rossi PhD Thesis: Light weight Structures: Structural Analysis and Coupling Issues
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace >
class ResidualBasedPseudoStaticDisplacementScheme
: public ResidualBasedBossakDisplacementScheme<TSparseSpace,TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedPseudoStaticDisplacementScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedBossakDisplacementScheme<TSparseSpace,TDenseSpace> DerivedBaseType;
typedef typename BaseType::LocalSystemComponents LocalSystemComponentsType;
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The pseudo static scheme (parameters)
* @param ThisParameters Parameters with the Rayleigh variable
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(Parameters ThisParameters)
: DerivedBaseType(0.0),
mRayleighBeta(NODAL_MAUX)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedPseudoStaticDisplacementScheme",
"rayleigh_beta_variable" : "RAYLEIGH_BETA"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
mRayleighBeta = KratosComponents<Variable<double>>::Get(ThisParameters["rayleigh_beta_variable"].GetString());
}
/**
* @brief Default constructor. The pseudo static scheme
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(const Variable<double>& RayleighBetaVariable)
:DerivedBaseType(0.0),
mRayleighBeta(RayleighBetaVariable)
{
}
/** Copy Constructor.
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(ResidualBasedPseudoStaticDisplacementScheme& rOther)
:DerivedBaseType(rOther),
mRayleighBeta(rOther.mRayleighBeta)
{
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedPseudoStaticDisplacementScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedPseudoStaticDisplacementScheme() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
DerivedBaseType::mpDofUpdater->UpdateDofs(rDofSet, rDx);
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
array_1d<double, 3 > delta_displacement;
#pragma omp parallel for private(delta_displacement)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
noalias(delta_displacement) = it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1);
noalias(r_current_velocity) = (DerivedBaseType::mBossak.c1 * delta_displacement - DerivedBaseType::mBossak.c4 * r_previous_velocity);
}
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Delta time
const double delta_time = r_current_process_info[DELTA_TIME];
// Updating time derivatives (nodally for efficiency)
const auto it_node_begin = rModelPart.Nodes().begin();
const int num_nodes = static_cast<int>(rModelPart.NumberOfNodes());
// Auxiliar variables
const array_1d<double, 3> zero_array = ZeroVector(3);
array_1d<double, 3 > delta_displacement = zero_array;
bool predicted_x, predicted_y, predicted_z;
// Getting position
const int disppos_x = it_node_begin->HasDofFor(DISPLACEMENT_X) ? it_node_begin->GetDofPosition(DISPLACEMENT_X) : -1;
const int velpos_x = it_node_begin->HasDofFor(VELOCITY_X) ? it_node_begin->GetDofPosition(VELOCITY_X) : -1;
const int disppos_y = it_node_begin->HasDofFor(DISPLACEMENT_Y) ? it_node_begin->GetDofPosition(DISPLACEMENT_Y) : -1;
const int velpos_y = it_node_begin->HasDofFor(VELOCITY_Y) ? it_node_begin->GetDofPosition(VELOCITY_Y) : -1;
const int disppos_z = it_node_begin->HasDofFor(DISPLACEMENT_Z) ? it_node_begin->GetDofPosition(DISPLACEMENT_Z) : -1;
const int velpos_z = it_node_begin->HasDofFor(VELOCITY_Z) ? it_node_begin->GetDofPosition(VELOCITY_Z) : -1;
#pragma omp parallel for private(delta_displacement, predicted_x, predicted_y, predicted_z)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
predicted_x = false;
predicted_y = false;
predicted_z = false;
//Predicting: r_current_displacement = r_previous_displacement + r_previous_velocity * delta_time;
//ATTENTION::: the prediction is performed only on free nodes
const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1);
const array_1d<double, 3>& r_previous_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3>& r_current_acceleration = it_node->FastGetSolutionStepValue(ACCELERATION);
array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
array_1d<double, 3>& r_current_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
if (velpos_x > -1) {
if (it_node->GetDof(VELOCITY_X, velpos_x).IsFixed()) {
delta_displacement[0] = (r_current_velocity[0] + DerivedBaseType::mBossak.c4 * r_previous_velocity[0])/DerivedBaseType::mBossak.c1;
r_current_displacement[0] = r_previous_displacement[0] + delta_displacement[0];
predicted_x = true;
}
}
if (disppos_x > -1 && !predicted_x) {
if (!it_node->GetDof(DISPLACEMENT_X, disppos_x).IsFixed() && !predicted_x) {
r_current_displacement[0] = r_previous_displacement[0] + delta_time * r_previous_velocity[0];
}
}
if (velpos_y > -1) {
if (it_node->GetDof(VELOCITY_Y, velpos_y).IsFixed()) {
delta_displacement[1] = (r_current_velocity[1] + DerivedBaseType::mBossak.c4 * r_previous_velocity[1])/DerivedBaseType::mBossak.c1;
r_current_displacement[1] = r_previous_displacement[1] + delta_displacement[1];
predicted_y = true;
}
}
if (disppos_y > -1 && !predicted_y) {
if (!it_node->GetDof(DISPLACEMENT_Y, disppos_y).IsFixed() && !predicted_y) {
r_current_displacement[1] = r_previous_displacement[1] + delta_time * r_previous_velocity[1];
}
}
if (velpos_z > -1) {
if (it_node->GetDof(VELOCITY_Z, velpos_z).IsFixed()) {
delta_displacement[2] = (r_current_velocity[2] + DerivedBaseType::mBossak.c4 * r_previous_velocity[2])/DerivedBaseType::mBossak.c1;
r_current_displacement[2] = r_previous_displacement[2] + delta_displacement[2];
predicted_z = true;
}
}
if (disppos_z > -1 && !predicted_z) {
if (!it_node->GetDof(DISPLACEMENT_Z, disppos_z).IsFixed() && !predicted_z) {
r_current_displacement[2] = r_previous_displacement[2] + delta_time * r_previous_velocity[2];
}
}
// Updating time derivatives
noalias(r_current_acceleration) = zero_array;
noalias(r_current_velocity) = r_previous_velocity;
}
KRATOS_CATCH( "" );
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedPseudoStaticDisplacementScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info() << ". Considering the following damping variable " << mRayleighBeta;
}
///@}
///@name Friends
///@{
protected:
///@}
///@name Static Member Variables
///@{
///@}
///@name Protected Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief It adds the dynamic LHS contribution of the elements D*c1 + K
* @param rLHSContribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) // if D matrix declared
noalias(rLHSContribution) += rD * DerivedBaseType::mBossak.c1;
else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
noalias(rLHSContribution) += rM * beta * DerivedBaseType::mBossak.c1;
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements b - D*v
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) {
rElement.GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= prod(rD, DerivedBaseType::mVector.v[this_thread]);
} else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
rElement.GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= beta * prod(rM, DerivedBaseType::mVector.v[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the condition b - M*a - D*v
* @param rCondition The condition to compute
* @param rRHSContribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding damping contribution
// Damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) {
rCondition.GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= prod(rD, DerivedBaseType::mVector.v[this_thread]);
} else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
rCondition.GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= beta * prod(rM, DerivedBaseType::mVector.v[this_thread]);
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Variable<double> mRayleighBeta; /// The Rayleigh Beta variable
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedPseudoStaticDisplacementScheme */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME E defined */
|
GB_unaryop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_int8
// op(A') function: GB_tran__minv_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_int8
(
int8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int *v) {
static int m_maxThreads = -1;
EIGEN_UNUSED_VARIABLE(m_maxThreads);
if (action == SetAction) {
eigen_internal_assert(v != 0);
m_maxThreads = *v;
} else if (action == GetAction) {
eigen_internal_assert(v != 0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
} else {
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel() {
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads() {
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v) {
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index>
struct GemmParallelInfo {
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor &func, Index rows, Index cols, Index depth, bool transpose) {
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0, rows, 0, cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose)
func(c0, actualBlockCols, 0, rows, info);
else
func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
TTeMPS_tangent_orth_omega_openmp.c | /* DOES NOT WORK PROPERLY, AS GETTING OPENMP SUPPORT IN MATLAB IS DIFFICULT. */
/*mex -lmwlapack -lmwblas -largeArrayDims partXY_blas.c */
/*
* TTeMPS Toolbox.
* Michael Steinlechner, 2013-2016
* Questions and contact: michael.steinlechner@epfl.ch
* BSD 2-clause license, see LICENSE.txt
*/
#define U_SLICE(i,j) &U[i][(ind[d*j+i]-1)*r[i]*r[i+1]]
#define V_SLICE(i,j) &V[i][(ind[d*j+i]-1)*r[i]*r[i+1]]
/*#define RES_SLICE(i,j) &result[i][(ind[d*j+i]-1)*r[i]*r[i+1]]*/
#define RES_SLICE(i,j) &result_part[i][(ind[d*j+i]-1)*r[i]*r[i+1]]
#include "mex.h"
#include "blas.h"
#include <omp.h>
/* calling:
TTeMPS_tangent_omega( n, r, Cores, ind, vals)
*/
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] ) {
/* input variables */
double* n_raw;
double* r_raw;
double** U;
double** V;
double* ind_raw;
double* vals;
/* output variables */
double** result;
mxArray** result_cells;
/* internal variables */
double* L;
double* current;
double* tmp;
double** result_part;
mwSignedIndex* n;
mwSignedIndex* r;
mwSignedIndex* ind;
mwSignedIndex numSubsref;
mwSignedIndex d;
mwSignedIndex i;
mwSignedIndex j;
mwSignedIndex k;
mwSignedIndex coresize;
mwSignedIndex maxrank = 1;
/* get sizes */
n_raw = mxGetPr( prhs[0] );
/* get ranks */
r_raw = mxGetPr( prhs[1] );
/* get indices */
ind_raw = mxGetPr( prhs[4] );
d = mxGetM( prhs[4] );
numSubsref = mxGetN( prhs[4] );
vals = mxGetPr( prhs[5] );
n = mxMalloc( d*sizeof(mwSignedIndex) );
r = mxMalloc( (d+1)*sizeof(mwSignedIndex) );
ind = mxMalloc( d*numSubsref*sizeof(mwSignedIndex) );
/* Convert index arrays to integer arrays as they get converted
* to double arrays when passing to mex.
* Converting beforehand allows to avoid multiple typecasts inside the inner loop */
for( i = 0; i < d; ++i ) {
n[i] = (mwSignedIndex) n_raw[i];
r[i] = (mwSignedIndex) r_raw[i];
if( r[i] > maxrank )
maxrank = r[i];
}
r[d] = (mwSize) r_raw[d];
for( i = 0; i < numSubsref*d; ++i ) {
ind[i] = (mwSignedIndex) ind_raw[i];
}
/* Get pointers to the matrices within the cell array */
U = mxMalloc( d*sizeof(double*) );
V = mxMalloc( d*sizeof(double*) );
for( i = 0; i < d; ++i ) {
U[i] = mxGetPr( mxGetCell( prhs[2], i ) );
V[i] = mxGetPr( mxGetCell( prhs[3], i ) );
}
/* Allocate space for output */
plhs[0] = mxCreateCellMatrix( 1, d );
result_cells = mxMalloc( d*sizeof(mxArray*) );
result = mxMalloc( d*sizeof(double*) );
for( i=0; i < d; i++){
result_cells[i] = mxCreateDoubleMatrix( r[i]*r[i+1]*n[i], 1, mxREAL);
result[i] = mxGetPr( result_cells[i] );
mxSetCell( plhs[0], i, result_cells[i] );
}
/* helper variables for dgemv call */
char transa = 'T';
char no_transa = 'N';
mwSignedIndex ONE_i = 1;
double ONE_d = 1.0;
double ZERO_d = 0.0;
/*#pragma omp parallel shared(n,r,d,ind,result,coresize,U,V) private(i,j,L,current,tmp,result_part)*/
#pragma omp parallel default(none) \
shared(n,r,d,ind,result,coresize,U,V, numSubsref, maxrank,ONE_i, ZERO_d, ONE_d, transa, vals, no_transa)\
private(i,j,k,L,current,tmp,result_part)
{
/* Allocate enough space for internal intermediate results */
/*L = malloc( maxrank*(d-1)*sizeof(double) );
current = malloc( maxrank*sizeof(double) );
tmp = malloc( maxrank*sizeof(double) );*/
#pragma omp critical
{
L = mxCalloc( maxrank*(d-1), sizeof(double) );
current = mxCalloc( maxrank, sizeof(double) );
tmp = mxCalloc( maxrank, sizeof(double) );
result_part = mxMalloc( d*sizeof(double*) );
for( i = 0; i < d; ++i )
result_part[i] = mxCalloc( r[i]*r[i+1]*n[i], sizeof(double) );
}
#pragma omp for
for( j = 0; j < numSubsref; ++j ) {
/* LEFT TO RIGHT FIRST (PRECOMPUTE)*/
/* ... copy first core to L: */
dcopy( &r[1], U_SLICE(0,j), &ONE_i, &L[0], &ONE_i );
/* ... and then multiply with the other cores and store results in columns of L: */
for( i = 1; i < d-1; ++i ) {
dgemv( &transa, &r[i], &r[i+1], &ONE_d,
U_SLICE(i,j),
&r[i],
&L[maxrank*(i-1)],
&ONE_i, &ZERO_d, &L[maxrank*i], &ONE_i);
}
/* RIGHT TO LEFT PRODUCTS NOW -- USING PRECOMPUTED LEFT SIDES FROM ABOVE */
/* last dU is without any contributions from the right */
daxpy( &r[d-1], &vals[j], &L[maxrank*(d-2)], &ONE_i, RES_SLICE(d-1,j), &ONE_i );
/* copy rightmost slice to current variable */
dcopy( &r[d-1], V_SLICE(d-1,j), &ONE_i, current, &ONE_i );
/* sweep right-left to form dU{i-1} to dU{1} */
for( i = d-2; i > 0; --i ) {
/* Outer product update:
* result(:,:,idx) = result(:,:,idx) + L(1:r(i), i-1)*current' */
dger( &r[i], &r[i+1], &vals[j],
&L[maxrank*(i-1)], &ONE_i,
current, &ONE_i,
RES_SLICE(i,j), &r[i] );
/* update current */
dgemv( &no_transa, &r[i], &r[i+1], &ONE_d,
V_SLICE(i,j),
&r[i],
current,
&ONE_i, &ZERO_d, tmp, &ONE_i);
/* ... and copy result back to current */
dcopy( &r[i], tmp, &ONE_i, current, &ONE_i );
}
/* last core */
daxpy( &r[1], &vals[j], current, &ONE_i, RES_SLICE(0,j), &ONE_i );
}
#pragma omp critical
{
/* gather all local parts into result vector */
for( i = 0; i < d; ++i ){
for( k = 0; k < r[i]*r[i+1]*n[i]; ++k )
result[i][k] += result_part[i][k];
/*daxpy( &coresize, &ONE_d, result_part[i], &ONE_i, result[i], &ONE_i );*/
}
/*for( i = 0; i < d; ++i )
coresize = *r[i]*r[i+1]*n[i];
result[i][0] += result_part[i][0];*/
}
#pragma omp critical
{
mxFree( current );
mxFree( tmp );
mxFree( L );
for( i = 0; i < d; ++i )
mxFree( result_part[i] );
mxFree(result_part);
}
}
mxFree( n );
mxFree( r );
mxFree( ind );
mxFree( U );
mxFree( V );
}
|
GB_binop__isle_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isle_int16
// A.*B function (eWiseMult): GB_AemultB__isle_int16
// A*D function (colscale): GB_AxD__isle_int16
// D*A function (rowscale): GB_DxB__isle_int16
// C+=B function (dense accum): GB_Cdense_accumB__isle_int16
// C+=b function (dense accum): GB_Cdense_accumb__isle_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int16
// C=scalar+B GB_bind1st__isle_int16
// C=scalar+B' GB_bind1st_tran__isle_int16
// C=A+scalar GB_bind2nd__isle_int16
// C=A'+scalar GB_bind2nd_tran__isle_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT16 || GxB_NO_ISLE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isle_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isle_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isle_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isle_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isle_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isle_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isle_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isle_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isle_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__isle_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__isle_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.