{"repo_name": "pogocache", "file_name": "/pogocache/src/hashmap.c", "inference_info": {"prefix_code": "// Copyright 2020 Joshua J Baker. All rights reserved.\n// Use of this source code is governed by an MIT-style\n// license that can be found in the LICENSE file.\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\n#define GROW_AT 0.60 /* 60% */\n#define SHRINK_AT 0.10 /* 10% */\n\n#ifndef HASHMAP_LOAD_FACTOR\n#define HASHMAP_LOAD_FACTOR GROW_AT\n#endif\n\nstatic void *(*__malloc)(size_t) = NULL;\nstatic void *(*__realloc)(void *, size_t) = NULL;\nstatic void (*__free)(void *) = NULL;\n\n// hashmap_set_allocator allows for configuring a custom allocator for\n// all hashmap library operations. This function, if needed, should be called\n// only once at startup and a prior to calling hashmap_new().\nvoid hashmap_set_allocator(void *(*malloc)(size_t), void (*free)(void*)) {\n __malloc = malloc;\n __free = free;\n}\n\nstruct bucket {\n uint64_t hash:48;\n uint64_t dib:16;\n};\n\n// hashmap is an open addressed hash map using robinhood hashing.\nstruct hashmap {\n void *(*malloc)(size_t);\n void *(*realloc)(void *, size_t);\n void (*free)(void *);\n size_t elsize;\n size_t cap;\n uint64_t seed0;\n uint64_t seed1;\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1);\n int (*compare)(const void *a, const void *b, void *udata);\n void (*elfree)(void *item);\n void *udata;\n size_t bucketsz;\n size_t nbuckets;\n size_t count;\n size_t mask;\n size_t growat;\n size_t shrinkat;\n uint8_t loadfactor;\n uint8_t growpower;\n bool oom;\n void *buckets;\n void *spare;\n void *edata;\n};\n\nvoid hashmap_set_grow_by_power(struct hashmap *map, size_t power) {\n map->growpower = power < 1 ? 1 : power > 16 ? 16 : power;\n}\n\nstatic double clamp_load_factor(double factor, double default_factor) {\n // Check for NaN and clamp between 50% and 90%\n return factor != factor ? default_factor : \n factor < 0.50 ? 0.50 : \n factor > 0.95 ? 0.95 : \n factor;\n}\n\nvoid hashmap_set_load_factor(struct hashmap *map, double factor) {\n factor = clamp_load_factor(factor, map->loadfactor / 100.0);\n map->loadfactor = factor * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n}\n\nstatic struct bucket *bucket_at0(void *buckets, size_t bucketsz, size_t i) {\n return (struct bucket*)(((char*)buckets)+(bucketsz*i));\n}\n\nstatic struct bucket *bucket_at(struct hashmap *map, size_t index) {\n return bucket_at0(map->buckets, map->bucketsz, index);\n}\n\nstatic void *bucket_item(struct bucket *entry) {\n return ((char*)entry)+sizeof(struct bucket);\n}\n\nstatic uint64_t clip_hash(uint64_t hash) {\n return hash & 0xFFFFFFFFFFFF;\n}\n\nstatic uint64_t get_hash(struct hashmap *map, const void *key) {\n return clip_hash(map->hash(key, map->seed0, map->seed1));\n}\n\n\n// hashmap_new_with_allocator returns a new hash map using a custom allocator.\n// See hashmap_new for more information information\nstruct hashmap *hashmap_new_with_allocator(void *(*_malloc)(size_t), \n void *(*_realloc)(void*, size_t), void (*_free)(void*),\n size_t elsize, size_t cap, uint64_t seed0, uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n _malloc = _malloc ? _malloc : __malloc ? __malloc : malloc;\n _realloc = _realloc ? _realloc : __realloc ? __realloc : realloc;\n _free = _free ? _free : __free ? __free : free;\n size_t ncap = 16;\n if (cap < ncap) {\n cap = ncap;\n } else {\n while (ncap < cap) {\n ncap *= 2;\n }\n cap = ncap;\n }\n size_t bucketsz = sizeof(struct bucket) + elsize;\n while (bucketsz & (sizeof(uintptr_t)-1)) {\n bucketsz++;\n }\n // hashmap + spare + edata\n size_t size = sizeof(struct hashmap)+bucketsz*2;\n struct hashmap *map = _malloc(size);\n if (!map) {\n return NULL;\n }\n memset(map, 0, sizeof(struct hashmap));\n map->elsize = elsize;\n map->bucketsz = bucketsz;\n map->seed0 = seed0;\n map->seed1 = seed1;\n map->hash = hash;\n map->compare = compare;\n map->elfree = elfree;\n map->udata = udata;\n map->spare = ((char*)map)+sizeof(struct hashmap);\n map->edata = (char*)map->spare+bucketsz;\n map->cap = cap;\n map->nbuckets = cap;\n map->mask = map->nbuckets-1;\n map->buckets = _malloc(map->bucketsz*map->nbuckets);\n if (!map->buckets) {\n _free(map);\n return NULL;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->growpower = 1;\n map->loadfactor = clamp_load_factor(HASHMAP_LOAD_FACTOR, GROW_AT) * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n map->shrinkat = map->nbuckets * SHRINK_AT;\n map->malloc = _malloc;\n map->realloc = _realloc;\n map->free = _free;\n return map; \n}\n\n// hashmap_new returns a new hash map. \n// Param `elsize` is the size of each element in the tree. Every element that\n// is inserted, deleted, or retrieved will be this size.\n// Param `cap` is the default lower capacity of the hashmap. Setting this to\n// zero will default to 16.\n// Params `seed0` and `seed1` are optional seed values that are passed to the \n// following `hash` function. These can be any value you wish but it's often \n// best to use randomly generated values.\n// Param `hash` is a function that generates a hash value for an item. It's\n// important that you provide a good hash function, otherwise it will perform\n// poorly or be vulnerable to Denial-of-service attacks. This implementation\n// comes with two helper functions `hashmap_sip()` and `hashmap_murmur()`.\n// Param `compare` is a function that compares items in the tree. See the \n// qsort stdlib function for an example of how this function works.\n// The hashmap must be freed with hashmap_free(). \n// Param `elfree` is a function that frees a specific item. This should be NULL\n// unless you're storing some kind of reference data in the hash.\nstruct hashmap *hashmap_new(size_t elsize, size_t cap, uint64_t seed0, \n uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n return hashmap_new_with_allocator(NULL, NULL, NULL, elsize, cap, seed0, \n seed1, hash, compare, elfree, udata);\n}\n\nstatic void free_elements(struct hashmap *map) {\n if (map->elfree) {\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib) map->elfree(bucket_item(bucket));\n }\n }\n}\n\n// hashmap_clear quickly clears the map. \n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\n// When the update_cap is provided, the map's capacity will be updated to match\n// the currently number of allocated buckets. This is an optimization to ensure\n// that this operation does not perform any allocations.\nvoid hashmap_clear(struct hashmap *map, bool update_cap) {\n map->count = 0;\n free_elements(map);\n if (update_cap) {\n map->cap = map->nbuckets;\n } else if (map->nbuckets != map->cap) {\n void *new_buckets = map->malloc(map->bucketsz*map->cap);\n if (new_buckets) {\n map->free(map->buckets);\n map->buckets = new_buckets;\n }\n map->nbuckets = map->cap;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * (map->loadfactor / 100.0) ;\n map->shrinkat = map->nbuckets * SHRINK_AT;\n}\n\nstatic bool resize0(struct hashmap *map, size_t new_cap) {\n struct hashmap *map2 = hashmap_new_with_allocator(map->malloc, map->realloc, \n map->free, map->elsize, new_cap, map->seed0, map->seed1, map->hash, \n map->compare, map->elfree, map->udata);\n if (!map2) return false;\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *entry = bucket_at(map, i);\n if (!entry->dib) {\n continue;\n }\n entry->dib = 1;\n size_t j = entry->hash & map2->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map2, j);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n break;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map2->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map2->spare, map->bucketsz);\n }\n j = (j + 1) & map2->mask;\n entry->dib += 1;\n }\n }\n map->free(map->buckets);\n map->buckets = map2->buckets;\n map->nbuckets = map2->nbuckets;\n map->mask = map2->mask;\n map->growat = map2->growat;\n map->shrinkat = map2->shrinkat;\n map->free(map2);\n return true;\n}\n\nstatic bool resize(struct hashmap *map, size_t new_cap) {\n return resize0(map, new_cap);\n}\n\n// hashmap_set_with_hash works like hashmap_set but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_set_with_hash(struct hashmap *map, const void *item,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*(1<growpower))) {\n map->oom = true;\n return NULL;\n }\n }\n\n struct bucket *entry = map->edata;\n entry->hash = hash;\n entry->dib = 1;\n void *eitem = bucket_item(entry);\n memcpy(eitem, item, map->elsize);\n\n void *bitem;\n size_t i = entry->hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n map->count++;\n return NULL;\n }\n bitem = bucket_item(bucket);\n if (entry->hash == bucket->hash && (!map->compare ||\n map->compare(eitem, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n memcpy(bitem, eitem, map->elsize);\n return map->spare;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map->spare, map->bucketsz);\n eitem = bucket_item(entry);\n }\n i = (i + 1) & map->mask;\n entry->dib += 1;\n }\n}\n\n// hashmap_set inserts or replaces an item in the hash map. If an item is\n// replaced then it is returned otherwise NULL is returned. This operation\n// may allocate memory. If the system is unable to allocate additional\n// memory then NULL is returned and hashmap_oom() returns true.\nconst void *hashmap_set(struct hashmap *map, const void *item) {\n return hashmap_set_with_hash(map, item, get_hash(map, item));\n}\n\n// hashmap_get_with_hash works like hashmap_get but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_get_with_hash(struct hashmap *map, const void *key, \n uint64_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) return NULL;\n if (bucket->hash == hash) {\n void *bitem = bucket_item(bucket);\n if (!map->compare || map->compare(key, bitem, map->udata) == 0) {\n return bitem;\n }\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_get returns the item based on the provided key. If the item is not\n// found then NULL is returned.\nconst void *hashmap_get(struct hashmap *map, const void *key) {\n return hashmap_get_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_probe returns the item in the bucket at position or NULL if an item\n// is not set for that bucket. The position is 'moduloed' by the number of \n// buckets in the hashmap.\nconst void *hashmap_probe(struct hashmap *map, uint64_t position) {\n size_t i = position & map->mask;\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n return bucket_item(bucket);\n}\n\n// hashmap_delete_with_hash works like hashmap_delete but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_delete_with_hash(struct hashmap *map, const void *key,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n void *bitem = bucket_item(bucket);\n if (bucket->hash == hash && (!map->compare ||\n map->compare(key, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n bucket->dib = 0;\n while(1) {\n struct bucket *prev = bucket;\n i = (i + 1) & map->mask;\n bucket = bucket_at(map, i);\n if (bucket->dib <= 1) {\n prev->dib = 0;\n break;\n }\n memcpy(prev, bucket, map->bucketsz);\n prev->dib--;\n }\n map->count--;\n if (map->nbuckets > map->cap && map->count <= map->shrinkat) {\n // Ignore the return value. It's ok for the resize operation to\n // fail to allocate enough memory because a shrink operation\n // does not change the integrity of the data.\n resize(map, map->nbuckets/2);\n }\n return map->spare;\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_delete removes an item from the hash map and returns it. If the\n// item is not found then NULL is returned.\nconst void *hashmap_delete(struct hashmap *map, const void *key) {\n return hashmap_delete_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_count returns the number of items in the hash map.\nsize_t hashmap_count(struct hashmap *map) {\n return map->count;\n}\n\n// hashmap_free frees the hash map\n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\nvoid hashmap_free(struct hashmap *map) {\n if (!map) return;\n free_elements(map);\n map->free(map->buckets);\n map->free(map);\n}\n\n// hashmap_oom returns true if the last hashmap_set() call failed due to the \n// system being out of memory.\nbool hashmap_oom(struct hashmap *map) {\n return map->oom;\n}\n\n// hashmap_scan iterates over all items in the hash map\n// Param `iter` can return false to stop iteration early.\n// Returns false if the iteration has been stopped early.\nbool hashmap_scan(struct hashmap *map, \n bool (*iter)(const void *item, void *udata), void *udata)\n{\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib && !iter(bucket_item(bucket), udata)) {\n return false;\n }\n }\n return true;\n}\n\n// hashmap_iter iterates one key at a time yielding a reference to an\n// entry at each iteration. Useful to write simple loops and avoid writing\n// dedicated callbacks and udata structures, as in hashmap_scan.\n//\n// map is a hash map handle. i is a pointer to a size_t cursor that\n// should be initialized to 0 at the beginning of the loop. item is a void\n// pointer pointer that is populated with the retrieved item. Note that this\n// is NOT a copy of the item stored in the hash map and can be directly\n// modified.\n//\n// Note that if hashmap_delete() is called on the hashmap being iterated,\n// the buckets are rearranged and the iterator must be reset to 0, otherwise\n// unexpected results may be returned after deletion.\n//\n// This function has not been tested for thread safety.\n//\n// The function returns true if an item was retrieved; false if the end of the\n// iteration has been reached.\nbool hashmap_iter(struct hashmap *map, size_t *i, void **item) {\n struct bucket *bucket;\n do {\n if (*i >= map->nbuckets) return false;\n bucket = bucket_at(map, *i);\n (*i)++;\n } while (!bucket->dib);\n *item = bucket_item(bucket);\n return true;\n}\n\n\n//-----------------------------------------------------------------------------\n// SipHash reference C implementation\n//\n// Copyright (c) 2012-2016 Jean-Philippe Aumasson\n// \n// Copyright (c) 2012-2014 Daniel J. Bernstein \n//\n// To the extent possible under law, the author(s) have dedicated all copyright\n// and related and neighboring rights to this software to the public domain\n// worldwide. This software is distributed without any warranty.\n//\n// You should have received a copy of the CC0 Public Domain Dedication along\n// with this software. If not, see\n// .\n//\n// default: SipHash-2-4\n//-----------------------------------------------------------------------------\nstatic uint64_t SIP64(const uint8_t *in, const size_t inlen, uint64_t seed0,\n uint64_t seed1) \n{\n#define U8TO64_LE(p) \\\n { (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \\\n ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \\\n ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \\\n ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56)) }\n#define U64TO8_LE(p, v) \\\n { U32TO8_LE((p), (uint32_t)((v))); \\\n U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); }\n#define U32TO8_LE(p, v) \\\n { (p)[0] = (uint8_t)((v)); \\\n (p)[1] = (uint8_t)((v) >> 8); \\\n (p)[2] = (uint8_t)((v) >> 16); \\\n (p)[3] = (uint8_t)((v) >> 24); }\n#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))\n#define SIPROUND \\\n { v0 += v1; v1 = ROTL(v1, 13); \\\n v1 ^= v0; v0 = ROTL(v0, 32); \\\n v2 += v3; v3 = ROTL(v3, 16); \\\n v3 ^= v2; \\\n v0 += v3; v3 = ROTL(v3, 21); \\\n v3 ^= v0; \\\n v2 += v1; v1 = ROTL(v1, 17); \\\n v1 ^= v2; v2 = ROTL(v2, 32); }\n uint64_t k0 = U8TO64_LE((uint8_t*)&seed0);\n uint64_t k1 = U8TO64_LE((uint8_t*)&seed1);\n uint64_t v3 = UINT64_C(0x7465646279746573) ^ k1;\n uint64_t v2 = UINT64_C(0x6c7967656e657261) ^ k0;\n uint64_t v1 = UINT64_C(0x646f72616e646f6d) ^ k1;\n uint64_t v0 = UINT64_C(0x736f6d6570736575) ^ k0;\n const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t));\n for (; in != end; in += 8) {\n uint64_t m = U8TO64_LE(in);\n v3 ^= m;\n SIPROUND; SIPROUND;\n v0 ^= m;\n }\n const int left = inlen & 7;\n uint64_t b = ((uint64_t)inlen) << 56;\n switch (left) {\n case 7: b |= ((uint64_t)in[6]) << 48; /* fall through */\n case 6: b |= ((uint64_t)in[5]) << 40; /* fall through */\n case 5: b |= ((uint64_t)in[4]) << 32; /* fall through */\n case 4: b |= ((uint64_t)in[3]) << 24; /* fall through */\n case 3: b |= ((uint64_t)in[2]) << 16; /* fall through */\n case 2: b |= ((uint64_t)in[1]) << 8; /* fall through */\n case 1: b |= ((uint64_t)in[0]); break;\n case 0: break;\n }\n v3 ^= b;\n SIPROUND; SIPROUND;\n v0 ^= b;\n v2 ^= 0xff;\n SIPROUND; SIPROUND; SIPROUND; SIPROUND;\n b = v0 ^ v1 ^ v2 ^ v3;\n uint64_t out = 0;\n U64TO8_LE((uint8_t*)&out, b);\n return out;\n}\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n//\n// Murmur3_86_128\n//-----------------------------------------------------------------------------\nstatic uint64_t MM86128(const void *key, const int len, uint32_t seed) {\n#define\tROTL32(x, r) ((x << r) | (x >> (32 - r)))\n#define FMIX32(h) h^=h>>16; h*=0x85ebca6b; h^=h>>13; h*=0xc2b2ae35; h^=h>>16;\n const uint8_t * data = (const uint8_t*)key;\n const int nblocks = len / 16;\n uint32_t h1 = seed;\n uint32_t h2 = seed;\n uint32_t h3 = seed;\n uint32_t h4 = seed;\n uint32_t c1 = 0x239b961b; \n uint32_t c2 = 0xab0e9789;\n uint32_t c3 = 0x38b34ae5; \n uint32_t c4 = 0xa1e38b93;\n const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);\n for (int i = -nblocks; i; i++) {\n uint32_t k1 = blocks[i*4+0];\n uint32_t k2 = blocks[i*4+1];\n uint32_t k3 = blocks[i*4+2];\n uint32_t k4 = blocks[i*4+3];\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;\n }\n const uint8_t * tail = (const uint8_t*)(data + nblocks*16);\n uint32_t k1 = 0;\n uint32_t k2 = 0;\n uint32_t k3 = 0;\n uint32_t k4 = 0;\n switch(len & 15) {\n case 15: k4 ^= tail[14] << 16; /* fall through */\n case 14: k4 ^= tail[13] << 8; /* fall through */\n case 13: k4 ^= tail[12] << 0;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n /* fall through */\n case 12: k3 ^= tail[11] << 24; /* fall through */\n case 11: k3 ^= tail[10] << 16; /* fall through */\n case 10: k3 ^= tail[ 9] << 8; /* fall through */\n case 9: k3 ^= tail[ 8] << 0;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n /* fall through */\n case 8: k2 ^= tail[ 7] << 24; /* fall through */\n case 7: k2 ^= tail[ 6] << 16; /* fall through */\n case 6: k2 ^= tail[ 5] << 8; /* fall through */\n case 5: k2 ^= tail[ 4] << 0;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n /* fall through */\n case 4: k1 ^= tail[ 3] << 24; /* fall through */\n case 3: k1 ^= tail[ 2] << 16; /* fall through */\n case 2: k1 ^= tail[ 1] << 8; /* fall through */\n case 1: k1 ^= tail[ 0] << 0;\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n /* fall through */\n };\n h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n FMIX32(h1); FMIX32(h2); FMIX32(h3); FMIX32(h4);\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n return (((uint64_t)h2)<<32)|h1;\n}\n\n//-----------------------------------------------------------------------------\n// xxHash Library\n// Copyright (c) 2012-2021 Yann Collet\n// All rights reserved.\n// \n// BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)\n//\n// xxHash3\n//-----------------------------------------------------------------------------\n#define XXH_PRIME_1 11400714785074694791ULL\n#define XXH_PRIME_2 14029467366897019727ULL\n#define XXH_PRIME_3 1609587929392839161ULL\n#define XXH_PRIME_4 9650029242287828579ULL\n#define XXH_PRIME_5 2870177450012600261ULL\n\nstatic uint64_t XXH_read64(const void* memptr) {\n uint64_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint32_t XXH_read32(const void* memptr) {\n uint32_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint64_t XXH_rotl64(uint64_t x, int r) {\n return (x << r) | (x >> (64 - r));\n}\n\nstatic uint64_t xxh3(const void* data, size_t len, uint64_t seed) {\n const uint8_t* p = (const uint8_t*)data;\n const uint8_t* const end = p + len;\n uint64_t h64;\n\n if (len >= 32) {\n const uint8_t* const limit = end - 32;\n uint64_t v1 = seed + XXH_PRIME_1 + XXH_PRIME_2;\n uint64_t v2 = seed + XXH_PRIME_2;\n uint64_t v3 = seed + 0;\n uint64_t v4 = seed - XXH_PRIME_1;\n\n do {\n v1 += XXH_read64(p) * XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n\n v2 += XXH_read64(p + 8) * XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n\n v3 += XXH_read64(p + 16) * XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n\n v4 += XXH_read64(p + 24) * XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n\n p += 32;\n } while (p <= limit);\n\n h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + \n XXH_rotl64(v4, 18);\n\n v1 *= XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n h64 ^= v1;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v2 *= XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n h64 ^= v2;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v3 *= XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n h64 ^= v3;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v4 *= XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n h64 ^= v4;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n }\n else {\n h64 = seed + XXH_PRIME_5;\n }\n\n h64 += (uint64_t)len;\n\n while (p + 8 <= end) {\n uint64_t k1 = XXH_read64(p);\n k1 *= XXH_PRIME_2;\n k1 = XXH_rotl64(k1, 31);\n k1 *= XXH_PRIME_1;\n h64 ^= k1;\n h64 = XXH_rotl64(h64, 27) * XXH_PRIME_1 + XXH_PRIME_4;\n p += 8;\n }\n\n if (p + 4 <= end) {\n h64 ^= (uint64_t)(XXH_read32(p)) * XXH_PRIME_1;\n h64 = XXH_rotl64(h64, 23) * XXH_PRIME_2 + XXH_PRIME_3;\n p += 4;\n }\n\n while (p < end) {\n h64 ^= (*p) * XXH_PRIME_5;\n h64 = XXH_rotl64(h64, 11) * XXH_PRIME_1;\n p++;\n }\n\n h64 ^= h64 >> 33;\n h64 *= XXH_PRIME_2;\n h64 ^= h64 >> 29;\n h64 *= XXH_PRIME_3;\n h64 ^= h64 >> 32;\n\n return h64;\n}\n\n// hashmap_sip returns a hash value for `data` using SipHash-2-4.\nuint64_t hashmap_sip(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n return SIP64((uint8_t*)data, len, seed0, seed1);\n}\n\n// hashmap_murmur returns a hash value for `data` using Murmur3_86_128.\nuint64_t hashmap_murmur(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return MM86128(data, len, seed0);\n}\n\nuint64_t hashmap_xxhash3(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return xxh3(data, len ,seed0);\n}\n\n//==============================================================================\n// TESTS AND BENCHMARKS\n// $ cc -DHASHMAP_TEST hashmap.c && ./a.out # run tests\n// $ cc -DHASHMAP_TEST -O3 hashmap.c && BENCH=1 ./a.out # run benchmarks\n//==============================================================================\n#ifdef HASHMAP_TEST\n\nstatic size_t deepcount(struct hashmap *map) {\n size_t count = 0;\n for (size_t i = 0; i < map->nbuckets; i++) {\n if (bucket_at(map, i)->dib) {\n count++;\n }\n }\n return count;\n}\n\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wpedantic\"\n#endif\n#ifdef __clang__\n#pragma GCC diagnostic ignored \"-Wunknown-warning-option\"\n#pragma GCC diagnostic ignored \"-Wcompound-token-split-by-macro\"\n#pragma GCC diagnostic ignored \"-Wgnu-statement-expression-from-macro-expansion\"\n#endif\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\nstatic bool rand_alloc_fail = false;\nstatic int rand_alloc_fail_odds = 3; // 1 in 3 chance malloc will fail.\nstatic uintptr_t total_allocs = 0;\nstatic uintptr_t total_mem = 0;\n\nstatic void *xmalloc(size_t size) {\n if (rand_alloc_fail && rand()%rand_alloc_fail_odds == 0) {\n return NULL;\n }\n void *mem = malloc(sizeof(uintptr_t)+size);\n assert(mem);\n *(uintptr_t*)mem = size;\n total_allocs++;\n total_mem += size;\n return (char*)mem+sizeof(uintptr_t);\n}\n\nstatic void xfree(void *ptr) {\n if (ptr) {\n total_mem -= *(uintptr_t*)((char*)ptr-sizeof(uintptr_t));\n free((char*)ptr-sizeof(uintptr_t));\n total_allocs--;\n }\n}\n\nstatic void shuffle(void *array, size_t numels, size_t elsize) {\n char tmp[elsize];\n char *arr = array;\n for (size_t i = 0; i < numels - 1; i++) {\n int j = i + rand() / (RAND_MAX / (numels - i) + 1);\n memcpy(tmp, arr + j * elsize, elsize);\n memcpy(arr + j * elsize, arr + i * elsize, elsize);\n memcpy(arr + i * elsize, tmp, elsize);\n }\n}\n\nstatic bool iter_ints(const void *item, void *udata) {\n int *vals = *(int**)udata;\n vals[*(int*)item] = 1;\n return true;\n}\n\nstatic int compare_ints_udata(const void *a, const void *b, void *udata) {\n return *(int*)a - *(int*)b;\n}\n\nstatic int compare_strs(const void *a, const void *b, void *udata) {\n return strcmp(*(char**)a, *(char**)b);\n}\n\nstatic uint64_t hash_int(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(item, sizeof(int), seed0, seed1);\n // return hashmap_sip(item, sizeof(int), seed0, seed1);\n // return hashmap_murmur(item, sizeof(int), seed0, seed1);\n}\n\nstatic uint64_t hash_str(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_sip(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_murmur(*(char**)item, strlen(*(char**)item), seed0, seed1);\n}\n\nstatic void free_str(void *item) {\n xfree(*(char**)item);\n}\n\nstatic void all(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):2000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n rand_alloc_fail = true;\n\n // test sip and murmur hashes\n assert(hashmap_sip(\"hello\", 5, 1, 2) == 2957200328589801622);\n assert(hashmap_murmur(\"hello\", 5, 1, 2) == 1682575153221130884);\n assert(hashmap_xxhash3(\"hello\", 5, 1, 2) == 2584346877953614258);\n\n int *vals;\n while (!(vals = xmalloc(N * sizeof(int)))) {}\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n struct hashmap *map;\n\n while (!(map = hashmap_new(sizeof(int), 0, seed, seed, \n hash_int, compare_ints_udata, NULL, NULL))) {}\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n // // printf(\"== %d ==\\n\", vals[i]);\n assert(map->count == (size_t)i);\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n const int *v;\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n \n for (int j = 0; j < i; j++) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n while (true) {\n v = hashmap_set(map, &vals[i]);\n if (!v) {\n assert(hashmap_oom(map));\n continue;\n } else {\n assert(!hashmap_oom(map));\n assert(v && *v == vals[i]);\n break;\n }\n }\n v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n assert(!hashmap_set(map, &vals[i]));\n assert(map->count == (size_t)(i+1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n }\n\n int *vals2;\n while (!(vals2 = xmalloc(N * sizeof(int)))) {}\n memset(vals2, 0, N * sizeof(int));\n assert(hashmap_scan(map, iter_ints, &vals2));\n\n // Test hashmap_iter. This does the same as hashmap_scan above.\n size_t iter = 0;\n void *iter_val;\n while (hashmap_iter (map, &iter, &iter_val)) {\n assert (iter_ints(iter_val, &vals2));\n }\n for (int i = 0; i < N; i++) {\n assert(vals2[i] == 1);\n }\n xfree(vals2);\n\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n const int *v;\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(map->count == (size_t)(N-i-1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n for (int j = N-1; j > i; j--) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n }\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n assert(map->count != 0);\n size_t prev_cap = map->cap;\n hashmap_clear(map, true);\n assert(prev_cap < map->cap);\n assert(map->count == 0);\n\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n prev_cap = map->cap;\n hashmap_clear(map, false);\n assert(prev_cap == map->cap);\n\n hashmap_free(map);\n\n xfree(vals);\n\n\n while (!(map = hashmap_new(sizeof(char*), 0, seed, seed,\n hash_str, compare_strs, free_str, NULL)));\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_clear(map, false);\n assert(hashmap_count(map) == 0);\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_free(map);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\n#define bench(name, N, code) {{ \\\n if (strlen(name) > 0) { \\\n printf(\"%-14s \", name); \\\n } \\\n size_t tmem = total_mem; \\\n size_t tallocs = total_allocs; \\\n uint64_t bytes = 0; \\\n clock_t begin = clock(); \\\n for (int i = 0; i < N; i++) { \\\n (code); \\\n } \\\n clock_t end = clock(); \\\n double elapsed_secs = (double)(end - begin) / CLOCKS_PER_SEC; \\\n double bytes_sec = (double)bytes/elapsed_secs; \\\n printf(\"%d ops in %.3f secs, %.0f ns/op, %.0f op/sec\", \\\n N, elapsed_secs, \\\n elapsed_secs/(double)N*1e9, \\\n (double)N/elapsed_secs \\\n ); \\\n if (bytes > 0) { \\\n printf(\", %.1f GB/sec\", bytes_sec/1024/1024/1024); \\\n } \\\n if (total_mem > tmem) { \\\n size_t used_mem = total_mem-tmem; \\\n printf(\", %.2f bytes/op\", (double)used_mem/N); \\\n } \\\n if (total_allocs > tallocs) { \\\n size_t used_allocs = total_allocs-tallocs; \\\n printf(\", %.2f allocs/op\", (double)used_allocs/N); \\\n } \\\n printf(\"\\n\"); \\\n}}\n\nstatic void benchmarks(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):5000000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n\n int *vals = xmalloc(N * sizeof(int));\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n shuffle(vals, N, sizeof(int));\n\n struct hashmap *map;\n shuffle(vals, N, sizeof(int));\n\n map = hashmap_new(sizeof(int), 0, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete\", N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n hashmap_free(map);\n\n map = hashmap_new(sizeof(int), N, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set (cap)\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get (cap)\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete (cap)\" , N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n\n hashmap_free(map);\n\n \n xfree(vals);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\n", "suffix_code": "\n\n\n#endif\n\n\n", "middle_code": "int main(void) {\n hashmap_set_allocator(xmalloc, xfree);\n if (getenv(\"BENCH\")) {\n printf(\"Running hashmap.c benchmarks...\\n\");\n benchmarks();\n } else {\n printf(\"Running hashmap.c tests...\\n\");\n all();\n printf(\"PASSED\\n\");\n }\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "c", "sub_task_type": null}, "context_code": [["/pogocache/src/pogocache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit pogocache.c is the primary caching engine library, which is designed\n// to be standalone and embeddable.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"pogocache.h\"\n\n#define MINLOADFACTOR_RH 55 // 55%\n#define MAXLOADFACTOR_RH 95 // 95%\n#define DEFLOADFACTOR 75 // 75%\n#define SHRINKAT 10 // 10%\n#define DEFSHARDS 4096 // default number of shards\n#define INITCAP 64 // intial number of buckets per shard\n\n// #define DBGCHECKENTRY\n// #define EVICTONITER\n// #define HALFSECONDTIME\n// #define NO48BITPTRS\n\n#if INTPTR_MAX == INT64_MAX\n#ifdef NO48BITPTRS\n#define PTRSIZE 8\n#else\n#define PTRSIZE 6\n#endif\n#elif INTPTR_MAX == INT32_MAX\n#define PTRSIZE 4\n#else\n#error Unknown pointer size\n#endif\n\nstatic struct pogocache_count_opts defcountopts = { 0 };\nstatic struct pogocache_total_opts deftotalopts = { 0 };\nstatic struct pogocache_size_opts defsizeopts = { 0 };\nstatic struct pogocache_sweep_opts defsweepopts = { 0 };\nstatic struct pogocache_clear_opts defclearopts = { 0 };\nstatic struct pogocache_store_opts defstoreopts = { 0 };\nstatic struct pogocache_load_opts defloadopts = { 0 };\nstatic struct pogocache_delete_opts defdeleteopts = { 0 };\nstatic struct pogocache_iter_opts defiteropts = { 0 };\nstatic struct pogocache_sweep_poll_opts defsweeppollopts = { 0 };\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// returns monotonic nanoseconds of the CPU clock.\nstatic int64_t gettime(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// returns offset of system clock since first call in thread.\nstatic int64_t getnow(void) {\n return gettime();\n}\n\n// https://github.com/tidwall/th64\nstatic uint64_t th64(const void *data, size_t len, uint64_t seed) {\n uint8_t*p=(uint8_t*)data,*e=p+len;\n uint64_t r=0x14020a57acced8b7,x,h=seed;\n while(p+8<=e)memcpy(&x,p,8),x*=r,p+=8,x=x<<31|x>>33,h=h*r^x,h=h<<31|h>>33;\n while(p>31,h*=r,h^=h>>31,h*=r,h^=h>>31,h*=r,h);\n}\n\n// Load a pointer from an unaligned memory.\nstatic void *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\n// Store a pointer into unaligned memory.\nstatic void store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nstatic uint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\n// Sixpack compression algorithm\n// - Converts a simple 8-bit string into 6-bit string.\n// - Intended to be used on small strings that only use characters commonly\n// used for keys in KV data stores.\n// - Allows the following 64 item character set:\n// -.0123456789:ABCDEFGHIJKLMNOPRSTUVWXY_abcdefghijklmnopqrstuvwxy\n// Note that the characters \"QZz\" are not included.\n// - Sortable and comparable using memcmp.\nstatic char tosix[256] = {\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0-15\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, // 32-47\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 0, 0, // 48-63\n 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, // 64-79\n 29, 0, 30, 31, 32, 33, 34, 35, 36, 37, 0, 0, 0, 0, 0, 38, // 80-95\n 0, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, // 96-111\n 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, // 112-127\n};\n\nstatic char fromsix[] = {\n 0, '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', '_', 'a', 'b', 'c',\n 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'\n};\n\n// 0: [000000..] bitpos: 0\n// 1: [00000011][1111....] bitpos: 6\n// 2: [00000011][11112222][22......] bitpos: 12 \n// 3: [00000011][11112222][22333333] bitpos: 18\n\n// Sixpack data\n// Fills the data in dst and returns the number of bytes filled.\n// Returns 0 if not a sixpackable.\n// The dst array must be large enough to hold packed value\nstatic int sixpack(const char *data, int len, char dst[]){\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n for (int i = 0; i < len; i++) {\n int k6v = tosix[bytes[i]];\n if (k6v == 0) {\n return 0;\n }\n if (i%4 == 0) {\n dst[j++] = k6v<<2;\n } else if (i%4 == 1) {\n dst[j-1] |= k6v>>4;\n dst[j++] = k6v<<4;\n } else if (i%4 == 2) {\n dst[j-1] |= k6v>>2;\n dst[j++] = k6v<<6;\n } else {\n dst[j-1] |= k6v;\n }\n }\n return j;\n}\n\n// (Un)sixpack data.\n// Fills the data in dst and returns the len of original data.\n// The data must be sixpacked and len must be > 0.\n// The dst array must be large enough to hold unpacked value\nstatic int unsixpack(const char *data, int len, char dst[]) {\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n int k = 0;\n for (int i = 0; i < len; i++) {\n if (k == 0) {\n dst[j++] = fromsix[bytes[i]>>2];\n k++;\n } else if (k == 1) {\n dst[j++] = fromsix[((bytes[i-1]<<4)|(bytes[i]>>4))&63];\n k++;\n } else {\n dst[j++] = fromsix[((bytes[i-1]<<2)|(bytes[i]>>6))&63];\n dst[j++] = fromsix[bytes[i]&63];\n k = 0;\n }\n }\n if (j > 0 && dst[j-1] == 0) {\n j--;\n }\n return j;\n}\n\n// Safely adds two int64_t values, clamping on overflow.\nstatic int64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n/// https://github.com/tidwall/varint.c\nstatic int varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nstatic int varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\n#ifdef HALFSECONDTIME\ntypedef uint32_t etime_t;\n#else\ntypedef int64_t etime_t;\n#endif\n\n\n// Mostly a copy of the pogocache_opts, but used internally\n// See the opts_to_ctx function for translation.\nstruct pgctx {\n void *(*malloc)(size_t);\n void (*free)(void*);\n size_t (*malloc_size)(void*);\n void (*yield)(void *udata);\n void (*evicted)(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata);\n void *udata;\n bool usecas;\n bool nosixpack;\n bool noevict;\n bool allowshrink;\n bool usethreadbatch;\n int nshards;\n double loadfactor;\n double shrinkfactor;\n uint64_t seed;\n};\n\n// The entry structure is a simple allocation with all the fields, being \n// variable in size, slammed together contiguously. There's a one byte header\n// that provides information about what is available in the structure.\n// The format is: (header,time,expires?,flags?,cas?,key,value)\n// The expires, flags, and cas fields are optional. The optionality depends on\n// header bit flags.\nstruct entry;\n\n// Returns the sizeof the entry struct, which takes up no space at all.\n// This would be like doing a sizeof(struct entry), if entry had a structure.\nstatic size_t entry_struct_size(void) {\n return 0;\n}\n\n// Returns the data portion of the entry, which is the entire allocation.\nstatic const uint8_t *entry_data(const struct entry *entry) {\n return (uint8_t*)entry;\n}\n\nstatic int64_t entry_expires(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n int64_t x = 0;\n if ((hdr>>0)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\nstatic int64_t entry_time(struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n etime_t etime;\n memcpy(&etime, p+1, sizeof(etime_t));\n#ifdef HALFSECONDTIME\n int64_t time = (int64_t)etime * INT64_C(500000000);\n#else \n int64_t time = etime;\n#endif \n return time;\n}\n\nstatic void entry_settime(struct entry *entry, int64_t time) {\n const uint8_t *p = entry_data(entry);\n#ifdef HALFSECONDTIME\n // Eviction time is stored as half seconds.\n etime_t etime = time / INT64_C(500000000);\n etime = etime > UINT32_MAX ? UINT32_MAX : etime;\n#else\n etime_t etime = time;\n#endif\n memcpy((uint8_t*)(p+1), &etime, sizeof(etime_t));\n}\n\nstatic int entry_alive_exp(int64_t expires, int64_t etime, int64_t now,\n int64_t cleartime)\n{\n return etime < cleartime ? POGOCACHE_REASON_CLEARED :\n expires > 0 && expires <= now ? POGOCACHE_REASON_EXPIRED :\n 0;\n}\n\nstatic int entry_alive(struct entry *entry, int64_t now, int64_t cleartime) {\n int64_t etime = entry_time(entry);\n int64_t expires = entry_expires(entry);\n return entry_alive_exp(expires, etime, now, cleartime);\n}\n\nstatic uint64_t entry_cas(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n uint64_t x = 0;\n if ((hdr>>2)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\n// returns the key. If using sixpack make sure to copy the result asap.\nstatic const char *entry_key(const struct entry *entry, size_t *keylen_out,\n char buf[128])\n{\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n if ((hdr>>3)&1) {\n keylen = unsixpack(key, (int)keylen, buf);\n key = buf;\n }\n *keylen_out = keylen;\n return key;\n}\n\n// returns the raw key. sixpack will be returned in it's raw format\nstatic const char *entry_rawkey(const struct entry *entry, size_t *keylen_out) {\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n *keylen_out = keylen;\n return key;\n}\n\nstatic bool entry_sixpacked(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p);\n return (hdr>>3)&1;\n}\n\nstatic size_t entry_extract(const struct entry *entry, const char **key,\n size_t *keylen, char buf[128], const char **val, size_t *vallen, \n int64_t *expires, uint32_t *flags, uint64_t *cas,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n if (expires) {\n memcpy(expires, p, 8);\n }\n p += 8; // expires\n } else {\n if (expires) {\n *expires = 0;\n }\n }\n if ((hdr>>1)&1) {\n if (flags) {\n memcpy(flags, p, 4);\n }\n p += 4; // flags\n } else {\n if (flags) {\n *flags = 0;\n }\n }\n if (ctx->usecas) {\n if (cas) {\n memcpy(cas, p, 8);\n }\n p += 8; // cas\n } else {\n if (cas) {\n *cas = 0;\n }\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n if (key) {\n *key = (char*)p;\n *keylen = x;\n if ((hdr>>3)&1) {\n *keylen = unsixpack(*key, (int)*keylen, buf);\n *key = buf;\n }\n }\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n if (val) {\n *val = (char*)p;\n *vallen = x;\n }\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\nstatic size_t entry_memsize(const struct entry *entry,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if (ctx->usecas) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\n// The 'cas' param should always be set to zero unless loading from disk. \n// Setting to zero will set a new unique cas to the entry.\nstatic struct entry *entry_new(const char *key, size_t keylen, const char *val,\n size_t vallen, int64_t expires, uint32_t flags, uint64_t cas,\n struct pgctx *ctx)\n{\n bool usesixpack = !ctx->nosixpack;\n#ifdef DBGCHECKENTRY\n // printf(\"entry_new(key=[%.*s], keylen=%zu, val=[%.*s], vallen=%zu, \"\n // \"expires=%\" PRId64 \", flags=%\" PRId32 \", cas=%\" PRIu64 \", \"\n // \"usesixpack=%d\\n\", (int)keylen, key, keylen, (int)vallen, key, vallen,\n // expires, flags, cas, usesixpack);\n int64_t oexpires = expires;\n uint32_t oflags = flags;\n uint64_t ocas = cas;\n const char *okey = key;\n size_t okeylen = keylen;\n const char *oval = val;\n size_t ovallen = vallen;\n#endif\n uint8_t hdr = 0;\n uint8_t keylenbuf[10];\n uint8_t vallenbuf[10];\n int nexplen, nflagslen, ncaslen, nkeylen, nvallen;\n if (expires > 0) {\n hdr |= 1;\n nexplen = 8;\n } else {\n nexplen = 0;\n }\n if (flags > 0) {\n hdr |= 2;\n nflagslen = 4;\n } else {\n nflagslen = 0;\n }\n if (ctx->usecas) {\n hdr |= 4;\n ncaslen = 8;\n } else {\n ncaslen = 0;\n }\n char buf[128];\n if (usesixpack && keylen <= 128) {\n size_t len = sixpack(key, keylen, buf);\n if (len > 0) {\n hdr |= 8;\n keylen = len;\n key = buf;\n }\n }\n nkeylen = varint_write_u64(keylenbuf, keylen);\n nvallen = varint_write_u64(vallenbuf, vallen);\n struct entry *entry_out = 0;\n size_t size = entry_struct_size()+1+sizeof(etime_t)+nexplen+nflagslen+\n ncaslen+nkeylen+keylen+nvallen+vallen;\n // printf(\"malloc=%p size=%zu, ctx=%p\\n\", ctx->malloc, size, ctx);\n void *mem = ctx->malloc(size);\n struct entry *entry = mem;\n if (!entry) {\n return 0;\n }\n uint8_t *p = (void*)entry_data(entry);\n *(p++) = hdr;\n memset(p, 0, sizeof(etime_t));\n p += sizeof(etime_t); // time\n if (nexplen > 0) {\n memcpy(p, &expires, nexplen);\n p += nexplen;\n }\n if (nflagslen > 0) {\n memcpy(p, &flags, nflagslen);\n p += nflagslen;\n }\n if (ncaslen > 0) {\n memcpy(p, &cas, ncaslen);\n p += ncaslen;\n }\n memcpy(p, keylenbuf, nkeylen);\n p += nkeylen;\n memcpy(p, key, keylen);\n p += keylen;\n memcpy(p, vallenbuf, nvallen);\n p += nvallen;\n memcpy(p, val, vallen);\n p += vallen;\n entry_out = entry;\n#ifdef DBGCHECKENTRY\n // check the key\n const char *key2, *val2;\n size_t keylen2, vallen2;\n int64_t expires2;\n uint32_t flags2;\n uint64_t cas2;\n char buf1[256];\n entry_extract(entry_out, &key2, &keylen2, buf1, &val2, &vallen2, &expires2,\n &flags2, &cas2, ctx);\n assert(expires2 == oexpires);\n assert(flags2 == oflags);\n assert(cas2 == ocas);\n assert(keylen2 == okeylen);\n assert(memcmp(key2, okey, okeylen) == 0);\n assert(vallen2 == ovallen);\n assert(memcmp(val2, oval, ovallen) == 0);\n#endif\n return entry_out;\n}\n\nstatic void entry_free(struct entry *entry, struct pgctx *ctx) {\n ctx->free(entry);\n}\n\nstatic int entry_compare(const struct entry *a, const struct entry *b) {\n size_t akeylen, bkeylen;\n char buf1[256], buf2[256];\n const char *akey;\n const char *bkey;\n if (entry_sixpacked(a) == entry_sixpacked(b)) {\n akey = entry_rawkey(a, &akeylen);\n bkey = entry_rawkey(b, &bkeylen);\n } else {\n akey = entry_key(a, &akeylen, buf1);\n bkey = entry_key(b, &bkeylen, buf2);\n }\n size_t size = akeylen < bkeylen ? akeylen : bkeylen;\n int cmp = memcmp(akey, bkey, size);\n if (cmp == 0) {\n cmp = akeylen < bkeylen ? -1 : akeylen > bkeylen;\n }\n return cmp;\n}\n\n#ifndef HASHSIZE\n#define HASHSIZE 3\n#endif\n#if HASHSIZE < 1 || HASHSIZE > 4\n#error bad hash size\n#endif\n\nstruct bucket {\n uint8_t entry[PTRSIZE]; // 48-bit pointer\n uint8_t hash[HASHSIZE]; // 24-bit hash\n uint8_t dib; // distance to bucket\n};\n\nstatic_assert(sizeof(struct bucket) == PTRSIZE+HASHSIZE+1, \"bad bucket size\");\n\nstruct map {\n int cap; // initial capacity\n int nbuckets; // number of buckets\n int count; // current entry count\n int mask; // bit mask for \n int growat;\n int shrinkat;\n struct bucket *buckets;\n uint64_t total; // current entry count\n size_t entsize; // memory size of all entries\n \n};\n\nstruct shard {\n atomic_uintptr_t lock; // spinlock (batch pointer)\n uint64_t cas; // compare and store value\n int64_t cleartime; // last clear time\n int clearcount; // number of items cleared\n struct map map; // robinhood hashmap\n // for batch linked list only\n struct shard *next;\n};\n\nstatic void lock_init(struct shard *shard) {\n atomic_init(&shard->lock, 0);\n}\n\nstruct batch {\n struct pogocache *cache; // associated cache.\n struct shard *shard; // first locked shard\n int64_t time; // timestamp\n};\n\nstruct pogocache {\n bool isbatch; \n union {\n struct pgctx ctx;\n struct batch batch;\n };\n struct shard shards[];\n};\n\nstatic struct entry *get_entry(struct bucket *bucket) {\n return load_ptr(bucket->entry);\n}\n\nstatic void set_entry(struct bucket *bucket, struct entry *entry) {\n store_ptr(bucket->entry, entry);\n}\n\n#if HASHSIZE == 1\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFF;\n}\nstatic void write_hash(uint8_t data[1], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[1]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n return hash;\n}\n#elif HASHSIZE == 2\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFF;\n}\nstatic void write_hash(uint8_t data[2], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[2]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n return hash;\n}\n#elif HASHSIZE == 3\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFFFF;\n}\nstatic void write_hash(uint8_t data[3], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[3]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n return hash;\n}\n#else \nstatic uint32_t clip_hash(uint32_t hash) {\n return hash;\n}\nstatic void write_hash(uint8_t data[4], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n data[3] = (hash>>24)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[4]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n hash |= ((uint64_t)data[3])<<24;\n return hash;\n}\n#endif\n\nstatic uint32_t get_hash(struct bucket *bucket) {\n return read_hash(bucket->hash);\n}\n\nstatic void set_hash(struct bucket *bucket, uint32_t hash) {\n write_hash(bucket->hash, hash);\n}\n\nstatic uint8_t get_dib(struct bucket *bucket) {\n return bucket->dib;\n}\n\nstatic void set_dib(struct bucket *bucket, uint8_t dib) {\n bucket->dib = dib;\n}\n\nstatic bool map_init(struct map *map, size_t cap, struct pgctx *ctx) {\n map->cap = cap;\n map->nbuckets = cap;\n map->count = 0;\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * ctx->loadfactor;\n map->shrinkat = map->nbuckets * ctx->shrinkfactor;\n size_t size = sizeof(struct bucket)*map->nbuckets;\n map->buckets = ctx->malloc(size);\n if (!map->buckets) {\n // nomem\n memset(map, 0, sizeof(struct map));\n return false;\n }\n memset(map->buckets, 0, size);\n return true;\n}\n\nstatic bool resize(struct map *map, size_t new_cap, struct pgctx *ctx) {\n struct map map2;\n if (!map_init(&map2, new_cap, ctx)) {\n return false;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket ebkt = map->buckets[i];\n if (get_dib(&ebkt)) {\n set_dib(&ebkt, 1);\n size_t j = get_hash(&ebkt) & map2.mask;\n while (1) {\n if (get_dib(&map2.buckets[j]) == 0) {\n map2.buckets[j] = ebkt;\n break;\n }\n if (get_dib(&map2.buckets[j]) < get_dib(&ebkt)) {\n struct bucket tmp = map2.buckets[j];\n map2.buckets[j] = ebkt;\n ebkt = tmp;\n }\n j = (j + 1) & map2.mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n }\n }\n int org_cap = map->cap;\n int org_count = map->count;\n ctx->free(map->buckets);\n memcpy(map, &map2, sizeof(struct map));\n map->cap = org_cap;\n map->count = org_count;\n return true;\n}\n\nstatic bool map_insert(struct map *map, struct entry *entry, uint32_t hash,\n struct entry **old, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*2, ctx)) {\n *old = 0;\n return false;\n }\n }\n map->entsize += entry_memsize(entry, ctx);\n struct bucket ebkt;\n set_entry(&ebkt, entry);\n set_hash(&ebkt, hash);\n set_dib(&ebkt, 1);\n size_t i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n // new entry\n map->buckets[i] = ebkt;\n map->count++;\n map->total++;\n *old = 0;\n return true;\n }\n if (get_hash(&ebkt) == get_hash(&map->buckets[i]) && \n entry_compare(get_entry(&ebkt), get_entry(&map->buckets[i])) == 0)\n {\n // replaced\n *old = get_entry(&map->buckets[i]);\n map->entsize -= entry_memsize(*old, ctx);\n set_entry(&map->buckets[i], get_entry(&ebkt));\n return true;\n }\n if (get_dib(&map->buckets[i]) < get_dib(&ebkt)) {\n struct bucket tmp = map->buckets[i];\n map->buckets[i] = ebkt;\n ebkt = tmp;\n }\n i = (i + 1) & map->mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n}\n\nstatic bool bucket_eq(struct map *map, size_t i, const char *key,\n size_t keylen, uint32_t hash)\n{\n if (get_hash(&map->buckets[i]) != hash) {\n return false;\n }\n size_t keylen2;\n char buf[128];\n const char *key2 = entry_key(get_entry(&map->buckets[i]), &keylen2, buf);\n return keylen == keylen2 && memcmp(key, key2, keylen) == 0;\n}\n\n// Returns the bucket index for key, or -1 if not found.\nstatic int map_get_bucket(struct map *map, const char *key, size_t keylen,\n uint32_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while (1) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n return -1;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return i;\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic struct entry *map_get_entry(struct map *map, const char *key,\n size_t keylen, uint32_t hash, int *bkt_idx_out)\n{\n int i = map_get_bucket(map, key, keylen, hash);\n *bkt_idx_out = i;\n return i >= 0 ? get_entry(&map->buckets[i]) : 0;\n}\n\n// This deletes entry from bucket and adjusts the dibs buckets to right, if\n// needed.\nstatic void delbkt(struct map *map, size_t i) {\n set_dib(&map->buckets[i], 0);\n while (1) {\n size_t h = i;\n i = (i + 1) & map->mask;\n if (get_dib(&map->buckets[i]) <= 1) {\n set_dib(&map->buckets[h], 0);\n break;\n }\n map->buckets[h] = map->buckets[i];\n set_dib(&map->buckets[h], get_dib(&map->buckets[h])-1);\n }\n map->count--;\n}\n\nstatic bool needsshrink(struct map *map, struct pgctx *ctx) {\n return ctx->allowshrink && map->nbuckets > map->cap && \n map->count <= map->shrinkat;\n}\n\n// Try to shrink the hashmap. If needed, this will allocate a new hashmap that\n// has fewer buckets and move all existing entries into the smaller map.\n// The 'multi' param is a hint that multi entries may have been deleted, such\n// as with the iter or clear operations.\n// If the resize fails due to an allocation error then the existing hashmap\n// will be retained.\nstatic void tryshrink(struct map *map, bool multi, struct pgctx *ctx) {\n if (!needsshrink(map, ctx)) {\n return;\n }\n int cap;\n if (multi) {\n // Determine how many buckets are needed to store all entries.\n cap = map->cap;\n int growat = cap * ctx->loadfactor;\n while (map->count >= growat) {\n cap *= 2;\n growat = cap * ctx->loadfactor;\n }\n } else {\n // Just half the buckets\n cap = map->nbuckets / 2;\n }\n resize(map, cap, ctx);\n}\n\n// delete an entry at bucket position. not called directly\nstatic struct entry *delentry_at_bkt(struct map *map, size_t i, \n struct pgctx *ctx)\n{\n struct entry *old = get_entry(&map->buckets[i]);\n assert(old);\n map->entsize -= entry_memsize(old, ctx);\n delbkt(map, i);\n return old;\n}\n\nstatic struct entry *map_delete(struct map *map, const char *key,\n size_t keylen, uint32_t hash, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n int i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n return 0;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return delentry_at_bkt(map, i, ctx);\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic size_t evict_entry(struct shard *shard, int shardidx, \n struct entry *entry, int64_t now, int reason, struct pgctx *ctx)\n{\n char buf[128];\n size_t keylen;\n const char *key = entry_key(entry, &keylen, buf);\n uint32_t hash = th64(key, keylen, ctx->seed);\n struct entry *del = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(del == entry); (void)del;\n if (ctx->evicted) {\n // Notify user that an entry was evicted.\n const char *val;\n size_t vallen;\n int64_t expires = 0;\n uint32_t flags = 0;\n uint64_t cas = 0;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val,\n vallen, expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n size_t size = entry_memsize(entry, ctx);\n entry_free(entry, ctx);\n return size;\n}\n\n// evict an entry using the 2-random algorithm.\n// Pick two random entries and delete the one with the oldest access time.\n// Do not evict the entry if it matches the provided hash.\nstatic void auto_evict_entry(struct shard *shard, int shardidx, uint32_t hash,\n int64_t now, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n struct map *map = &shard->map;\n struct entry *entries[2];\n int count = 0;\n for (int i = 1; i < map->nbuckets && count < 2; i++) {\n size_t j = (i+hash)&(map->nbuckets-1);\n struct bucket *bkt = &map->buckets[j];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry has expired. Evict this one instead.\n evict_entry(shard, shardidx, entry, now, reason, ctx);\n return;\n }\n if (get_hash(bkt) == hash) {\n continue;\n }\n entries[count++] = entry;\n }\n int choose;\n if (count == 1) {\n choose = 0;\n } else if (count == 2) {\n // We now have two candidates.\n if (entry_time(entries[0]) < entry_time(entries[1])) {\n choose = 0;\n } else {\n choose = 1;\n }\n } else {\n return;\n }\n evict_entry(shard, shardidx, entries[choose], now, POGOCACHE_REASON_LOWMEM,\n ctx);\n}\n\nstatic void shard_deinit(struct shard *shard, struct pgctx *ctx) {\n struct map *map = &shard->map;\n if (!map->buckets) {\n return;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n entry_free(entry, ctx);\n }\n ctx->free(map->buckets);\n}\n\nstatic bool shard_init(struct shard *shard, struct pgctx *ctx) {\n memset(shard, 0, sizeof(struct shard));\n lock_init(shard);\n shard->cas = 1;\n if (!map_init(&shard->map, INITCAP, ctx)) {\n // nomem\n shard_deinit(shard, ctx);\n return false;\n }\n return true;\n}\n\n/// Free all cache and shard hashmap allocations.\n/// This does not access the value data in any of the entries. If it is needed\n/// for the further cleanup at an entry value level, then use the\n/// pogocache_iter to perform the cleanup on each entry before calling this\n/// operation.\n/// Also this is not threadsafe. Make sure that other threads are not\n/// currently using the cache concurrently nor after this function is called.\nvoid pogocache_free(struct pogocache *cache) {\n if (!cache) {\n return;\n }\n struct pgctx *ctx = &cache->ctx;\n for (int i = 0; i < cache->ctx.nshards; i++) {\n shard_deinit(&cache->shards[i], ctx);\n }\n cache->ctx.free(cache);\n}\n\nstatic void opts_to_ctx(int nshards, struct pogocache_opts *opts,\n struct pgctx *ctx)\n{\n ctx->nshards = nshards;\n int loadfactor = 0;\n if (opts) {\n ctx->yield = opts->yield;\n ctx->evicted = opts->evicted;\n ctx->udata = opts->udata;\n ctx->usecas = opts->usecas;\n ctx->nosixpack = opts->nosixpack;\n ctx->noevict = opts->noevict;\n ctx->seed = opts->seed;\n loadfactor = opts->loadfactor;\n ctx->allowshrink = opts->allowshrink;\n ctx->usethreadbatch = opts->usethreadbatch;\n }\n // make loadfactor a floating point\n loadfactor = loadfactor == 0 ? DEFLOADFACTOR :\n loadfactor < MINLOADFACTOR_RH ? MINLOADFACTOR_RH :\n loadfactor > MAXLOADFACTOR_RH ? MAXLOADFACTOR_RH :\n loadfactor;\n ctx->loadfactor = ((double)loadfactor/100.0);\n ctx->shrinkfactor = ((double)SHRINKAT/100.0);\n}\n\nstatic struct pogocache_opts newdefopts = { 0 };\n\n/// Returns a new cache or null if there is not enough memory available.\n/// See 'pogocache_opts' for all options.\nstruct pogocache *pogocache_new(struct pogocache_opts *opts) {\n if (!opts) {\n opts = &newdefopts;\n }\n void *(*_malloc)(size_t) = opts->malloc ? opts->malloc : malloc;\n void (*_free)(void*) = opts->free ? opts->free : free;\n int shards = !opts || opts->nshards <= 0 ? DEFSHARDS : opts->nshards;\n size_t size = sizeof(struct pogocache)+shards*sizeof(struct shard);\n struct pogocache *cache = _malloc(size);\n if (!cache) {\n return 0;\n }\n memset(cache, 0, sizeof(struct pogocache));\n struct pgctx *ctx = &cache->ctx;\n opts_to_ctx(shards, opts, ctx);\n ctx->malloc = _malloc;\n ctx->free = _free;\n for (int i = 0; i < ctx->nshards; i++) {\n if (!shard_init(&cache->shards[i], ctx)) {\n // nomem\n pogocache_free(cache);\n return 0;\n }\n }\n return cache;\n}\n\nstatic int shard_index(struct pogocache *cache, uint64_t hash) {\n return (hash>>32)%cache->ctx.nshards;\n}\n\nstatic struct shard *shard_get(struct pogocache *cache, int index) {\n return &cache->shards[index];\n}\n\n/// Returns a timestamp.\nint64_t pogocache_now(void) {\n return getnow();\n}\n\nstatic __thread struct pogocache thbatch;\n\nstruct pogocache *pogocache_begin(struct pogocache *cache) {\n struct pogocache *batch;\n if (cache->ctx.usethreadbatch) {\n batch = &thbatch;\n } else {\n batch = cache->ctx.malloc(sizeof(struct pogocache));\n if (!batch) {\n return 0;\n }\n }\n batch->isbatch = true;\n batch->batch.cache = cache;\n batch->batch.shard = 0;\n batch->batch.time = 0;\n return batch;\n}\n\nvoid pogocache_end(struct pogocache *batch) {\n assert(batch->isbatch);\n struct shard *shard = batch->batch.shard;\n while (shard) {\n struct shard *next = shard->next;\n shard->next = 0;\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE);\n shard = next;\n }\n if (!batch->batch.cache->ctx.usethreadbatch) {\n batch->batch.cache->ctx.free(batch);\n }\n}\n\nstatic void lock(struct batch *batch, struct shard *shard, struct pgctx *ctx) {\n if (batch) {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n (uintptr_t)(void*)batch, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n shard->next = batch->shard;\n batch->shard = shard;\n break;\n }\n if (val == (uintptr_t)(void*)batch) {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n } else {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n UINTPTR_MAX, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n }\n}\n\nstatic bool acquire_for_scan(int shardidx, struct shard **shard_out, \n struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *shard_out = shard;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// acquire a lock for the key\nstatic bool acquire_for_key(const char *key, size_t keylen, uint32_t *hash_out,\n struct shard **shard_out, int *shardidx_out, struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n uint64_t fhash = th64(key, keylen, cache->ctx.seed);\n int shardidx = shard_index(cache, fhash);\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *hash_out = fhash;\n *shard_out = shard;\n *shardidx_out = shardidx;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// Acquire a lock on the shard for key and execute the provided operation.\n#define ACQUIRE_FOR_KEY_AND_EXECUTE(rettype, key, keylen, op) ({ \\\n int shardidx; \\\n uint32_t hash; \\\n struct shard *shard; \\\n bool usebatch = acquire_for_key((key), (keylen), &hash, &shard, &shardidx, \\\n &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)shardidx, (void)hash, (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\n// Acquire a lock on the shard at index and execute the provided operation.\n#define ACQUIRE_FOR_SCAN_AND_EXECUTE(rettype, shardidx, op) ({ \\\n struct shard *shard; \\\n bool usebatch = acquire_for_scan((shardidx), &shard, &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\nstatic int loadop(const void *key, size_t keylen, \n struct pogocache_load_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defloadopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n // Get the entry bucket index for the entry with key.\n int bidx = map_get_bucket(&shard->map, key, keylen, hash);\n if (bidx == -1) {\n return POGOCACHE_NOTFOUND;\n }\n // Extract the bucket, entry, and values.\n struct bucket *bkt = &shard->map.buckets[bidx];\n struct entry *entry = get_entry(bkt);\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. Evict the entry and clear the bucket.\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(entry, ctx);\n delbkt(&shard->map, bidx);\n return POGOCACHE_NOTFOUND;\n }\n if (!opts->notouch) {\n entry_settime(entry, now);\n }\n if (opts->entry) {\n struct pogocache_update *update = 0;\n opts->entry(shardidx, now, key, keylen, val, vallen, expires, flags,\n cas, &update, opts->udata);\n if (update) {\n // User wants to update the entry.\n shard->cas++;\n struct entry *entry2 = entry_new(key, keylen, update->value,\n update->valuelen, update->expires, update->flags, shard->cas, \n ctx);\n if (!entry2) {\n return POGOCACHE_NOMEM;\n }\n entry_settime(entry2, now);\n set_entry(bkt, entry2);\n entry_free(entry, ctx);\n }\n }\n return POGOCACHE_FOUND;\n}\n\n/// Loads an entry from the cache.\n/// Use the pogocache_load_opts.entry callback to access the value of the entry.\n/// It's possible to update the value using the 'update' param in the callback.\n/// See 'pogocache_load_opts' for all options.\n/// @returns POGOCACHE_FOUND when the entry was found.\n/// @returns POGOCACHE_NOMEM when the entry cannot be updated due to no memory.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\nint pogocache_load(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_load_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen, \n loadop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int deleteop(const void *key, size_t keylen, \n struct pogocache_delete_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defdeleteopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n struct entry *entry = map_delete(&shard->map, key, keylen, hash, ctx);\n if (!entry) {\n // Entry does not exist\n return POGOCACHE_NOTFOUND;\n }\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. It was already deleted from the map but\n // we still need to notify the user.\n if (ctx->evicted) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (opts->entry) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen,\n expires, flags, cas, opts->udata))\n {\n // User canceled the delete. Put it back into the map.\n // This insert will not cause an allocation error because the \n // previous delete operation left us with at least one available\n // bucket.\n struct entry *old;\n bool ok = map_insert(&shard->map, entry, hash, &old, ctx);\n assert(ok); (void)ok;\n assert(!old);\n return POGOCACHE_CANCELED;\n }\n }\n // Entry was successfully deleted.\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_DELETED;\n}\n\n/// Deletes an entry from the cache.\n/// See 'pogocache_delete_opts' for all options.\n/// @returns POGOCACHE_DELETED when the entry was successfully deleted.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\n/// @returns POGOCACHE_CANCELED when opts.entry callback returned false.\nint pogocache_delete(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_delete_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n deleteop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int storeop(const void *key, size_t keylen, const void *val,\n size_t vallen, struct pogocache_store_opts *opts, struct shard *shard,\n int shardidx, uint32_t hash, struct pgctx *ctx)\n{\n int count = shard->map.count;\n opts = opts ? opts : &defstoreopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int64_t expires = 0;\n if (opts->expires > 0) {\n expires = opts->expires;\n } else if (opts->ttl > 0) {\n expires = int64_add_clamp(now, opts->ttl);\n }\n if (opts->keepttl) {\n // User wants to keep the existing ttl. Get the existing entry from the\n // map first and take its expiration.\n int i;\n struct entry *old = map_get_entry(&shard->map, key, keylen, hash, &i);\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason == 0) {\n expires = entry_expires(old);\n }\n }\n }\n shard->cas++;\n struct entry *entry = entry_new(key, keylen, val, vallen, expires,\n opts->flags, shard->cas, ctx);\n if (!entry) {\n goto nomem;\n }\n entry_settime(entry, now);\n if (opts->lowmem && ctx->noevict) {\n goto nomem;\n }\n // Insert new entry into map\n struct entry *old;\n if (!map_insert(&shard->map, entry, hash, &old, ctx)) {\n goto nomem;\n }\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason) {\n // There's an old entry, but it's no longer alive.\n // Treat this like an eviction and notify the user.\n if (ctx->evicted) {\n const char *oval;\n size_t ovallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0,\n &oval, &ovallen, &oexpires, &oflags, &ocas, ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, oval, ovallen,\n oexpires, oflags, ocas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(old, ctx);\n old = 0;\n }\n }\n int put_back_status = 0;\n if (old) {\n if (opts->casop) {\n // User is requesting the cas operation.\n if (ctx->usecas) {\n uint64_t old_cas = entry_cas(old);\n if (opts->cas != old_cas) {\n // CAS test failed.\n // printf(\". cas failed: expected %\" PRIu64 \", \"\n // \"got %\" PRIu64 \"\\n\", cas, old_cas);\n put_back_status = POGOCACHE_FOUND;\n }\n } else {\n put_back_status = POGOCACHE_FOUND;\n }\n } else if (opts->nx) {\n put_back_status = POGOCACHE_FOUND;\n }\n if (put_back_status) {\n put_back:;\n // The entry needs be put back into the map and operation must\n // return early.\n // This insert operation must not fail since the entry 'e' and\n // 'old' both exist and will always be bucket swapped. There will\n // never be a new allocation.\n struct entry *e = 0;\n bool ok = map_insert(&shard->map, old, hash, &e, ctx);\n assert(ok); (void)ok;\n assert(e == entry);\n entry_free(entry, ctx);\n return put_back_status;\n }\n } else if (opts->xx || opts->casop) {\n // The new entry must not be inserted.\n // Delete it and return early.\n struct entry *e = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(e == entry); (void)e;\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (old && opts->entry) {\n // User is requesting to verify the old entry before allowing it to be\n // replaced by the new entry.\n const char *val;\n size_t vallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0, &val, &vallen, &oexpires, &oflags, &ocas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen, oexpires,\n oflags, ocas, opts->udata))\n {\n // User wants to keep the old entry.\n put_back_status = POGOCACHE_CANCELED;\n goto put_back;\n }\n }\n // The new entry was inserted.\n if (old) {\n entry_free(old, ctx);\n return POGOCACHE_REPLACED;\n } else {\n if (opts->lowmem && shard->map.count > count) {\n // The map grew by one bucket, yet the user indicates that there is\n // a low memory event. Evict one entry.\n auto_evict_entry(shard, shardidx, hash, now, ctx);\n }\n return POGOCACHE_INSERTED;\n }\nnomem:\n entry_free(entry, ctx);\n return POGOCACHE_NOMEM;\n}\n\n/// Insert or replace an entry in the cache.\n/// If an entry with the same key already exists then the cache then the \n/// the opts.entry callback can be used to check the existing\n/// value first, allowing the operation to be canceled.\n/// See 'pogocache_store_opts' for all options.\n/// @returns POGOCACHE_INSERTED when the entry was inserted.\n/// @returns POGOCACHE_REPLACED when the entry replaced an existing one.\n/// @returns POGOCACHE_FOUND when the entry already exists. (cas/nx)\n/// @returns POGOCACHE_CANCELED when the operation was canceled.\n/// @returns POGOCACHE_NOMEM when there is system memory available.\nint pogocache_store(struct pogocache *cache, const void *key, size_t keylen, \n const void *val, size_t vallen, struct pogocache_store_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n storeop(key, keylen, val, vallen, opts, shard, shardidx, hash, ctx)\n );\n}\n\n\nstatic struct pogocache *rootcache(struct pogocache *cache) {\n return cache->isbatch ? cache->batch.cache : cache;\n}\n\n/// Returns the number of shards in cache\nint pogocache_nshards(struct pogocache *cache) {\n cache = rootcache(cache);\n return cache->ctx.nshards;\n}\n\nstatic int iterop(struct shard *shard, int shardidx, int64_t now,\n struct pogocache_iter_opts *opts, struct pgctx *ctx)\n{\n char buf[128];\n int status = POGOCACHE_FINISHED;\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen,\n &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n#ifdef EVICTONITER\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n // Delete entry at bucket.\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n#endif\n } else {\n // Entry is alive, check with user for next action.\n int action = POGOCACHE_ITER_CONTINUE;\n if (opts->entry) {\n action = opts->entry(shardidx, now, key, keylen, val,\n vallen, expires, flags, cas, opts->udata);\n }\n if (action != POGOCACHE_ITER_CONTINUE) {\n if (action&POGOCACHE_ITER_DELETE) {\n // Delete entry at bucket\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n }\n if (action&POGOCACHE_ITER_STOP) {\n status = POGOCACHE_CANCELED;\n break;\n }\n }\n }\n }\n tryshrink(&shard->map, true, ctx);\n return status;\n}\n\n/// Iterate over entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The pogocache_iter_opts.entry callback can be used to perform actions such\n/// as: deleting entries and stopping iteration early. \n/// See 'pogocache_iter_opts' for all options.\n/// @return POGOCACHE_FINISHED if iteration completed\n/// @return POGOCACHE_CANCELED if iteration stopped early\nint pogocache_iter(struct pogocache *cache, struct pogocache_iter_opts *opts) {\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defiteropts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return POGOCACHE_FINISHED;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n iterop(shard, opts->oneshardidx, now, opts, &cache->ctx)\n );\n }\n for (int i = 0; i < nshards; i++) {\n int status = ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n iterop(shard, i, now, opts, &cache->ctx)\n );\n if (status != POGOCACHE_FINISHED) {\n return status;\n }\n }\n return POGOCACHE_FINISHED;\n}\n\nstatic size_t countop(struct shard *shard) {\n return shard->map.count - shard->clearcount;\n}\n\n/// Returns the number of entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_count(struct pogocache *cache,\n struct pogocache_count_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defcountopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n countop(shard);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n countop(shard);\n );\n }\n return count;\n}\n\nstatic uint64_t totalop(struct shard *shard) {\n return shard->map.total;\n}\n\n/// Returns the total number of entries that have ever been stored in the cache.\n/// For the current number of entries use pogocache_count().\n/// There's an option to allow for isolating the operation to a single shard.\nuint64_t pogocache_total(struct pogocache *cache,\n struct pogocache_total_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &deftotalopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, opts->oneshardidx,\n totalop(shard);\n );\n }\n uint64_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, i,\n totalop(shard);\n );\n }\n return count;\n}\n\nstatic size_t sizeop(struct shard *shard, bool entriesonly) {\n size_t size = 0;\n if (!entriesonly) {\n size += sizeof(struct shard);\n size += sizeof(struct bucket)*shard->map.nbuckets;\n }\n size += shard->map.entsize;\n return size;\n}\n\n/// Returns the total memory size of the shard.\n/// This includes the memory size of all data structures and entries.\n/// Use the entriesonly option to limit the result to only the entries.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_size(struct pogocache *cache,\n struct pogocache_size_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsizeopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n sizeop(shard, opts->entriesonly);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n sizeop(shard, opts->entriesonly);\n );\n }\n return count;\n}\n\n\n\nstatic int sweepop(struct shard *shard, int shardidx, int64_t now,\n size_t *swept, size_t *kept, struct pgctx *ctx)\n{\n char buf[128];\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int64_t expires = entry_expires(entry);\n int64_t etime = entry_time(entry);\n int reason = entry_alive_exp(expires, etime, now, shard->cleartime);\n if (reason == 0) {\n // entry is still alive\n (*kept)++;\n continue;\n }\n // entry is no longer alive.\n if (ctx->evicted) {\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen, &expires,\n &flags, &cas, ctx);\n // Report eviction to user\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n (*swept)++;\n // Entry was deleted from bucket, which may move entries to the right\n // over one bucket to the left. So we need to check the same bucket\n // again.\n i--;\n }\n tryshrink(&shard->map, true, ctx);\n return 0;\n}\n\n/// Remove expired entries from the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The final 'kept' or 'swept' counts are returned.\n/// @return POGOCACHE_FINISHED when iteration completed\n/// @return POGOCACHE_CANCELED when iteration stopped early\nvoid pogocache_sweep(struct pogocache *cache, size_t *swept, size_t *kept, \n struct pogocache_sweep_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweepopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n size_t sweptc = 0;\n size_t keptc = 0;\n if (opts->oneshard) {\n if (opts->oneshardidx >= 0 && opts->oneshardidx < nshards) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n sweepop(shard, opts->oneshardidx, now, &sweptc, &keptc,\n &cache->ctx);\n );\n }\n } else {\n for (int i = 0; i < nshards; i++) {\n size_t sweptc2 = 0;\n size_t keptc2 = 0;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n sweepop(shard, i, now, &sweptc2, &keptc2, &cache->ctx);\n );\n sweptc += sweptc2;\n keptc += keptc2;\n }\n }\n if (swept) {\n *swept = sweptc;\n }\n if (kept) {\n *kept = keptc;\n }\n}\n\nstatic int clearop(struct shard *shard, int shardidx, int64_t now, \n struct pgctx *ctx)\n{\n (void)shardidx, (void)ctx;\n shard->cleartime = now;\n shard->clearcount += (shard->map.count-shard->clearcount);\n return 0;\n}\n\n/// Clear the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nvoid pogocache_clear(struct pogocache *cache, struct pogocache_clear_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defclearopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return;\n }\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n clearop(shard, opts->oneshardidx, now, &cache->ctx);\n );\n return;\n }\n for (int i = 0; i < cache->ctx.nshards; i++) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n clearop(shard, i, now, &cache->ctx);\n );\n }\n}\n\nstatic int sweeppollop(struct shard *shard, int shardidx, int64_t now, \n int pollsize, double *percent)\n{\n // start at random bucket\n int count = 0;\n int dead = 0;\n int bidx = mix13(now+shardidx)%shard->map.nbuckets;\n for (int i = 0; i < shard->map.nbuckets && count < pollsize; i++) {\n struct bucket *bkt = &shard->map.buckets[(bidx+i)%shard->map.nbuckets];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n count++;\n dead += (entry_alive(entry, now, shard->cleartime) != 0);\n }\n if (count == 0) {\n *percent = 0;\n return 0;\n }\n *percent = (double)dead/(double)count;\n return 0;\n}\n\ndouble pogocache_sweep_poll(struct pogocache *cache, \n struct pogocache_sweep_poll_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweeppollopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int pollsize = opts->pollsize == 0 ? 20 : opts->pollsize;\n \n // choose a random shard\n int shardidx = mix13(now)%nshards;\n double percent;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, shardidx,\n sweeppollop(shard, shardidx, now, pollsize, &percent);\n );\n return percent;\n}\n"], ["/pogocache/src/postgres.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit postgres.c provides the parser for the Postgres wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n\n// #define PGDEBUG\n\n#define TEXTOID 25\n#define BYTEAOID 17\n\nextern const char *version;\nextern const char *auth;\n\n#ifdef PGDEBUG\n#define dprintf printf\n#else\n#define dprintf(...)\n#endif\n\nstatic void print_packet(const char *data, size_t len) {\n dprintf(\". PACKET=%03zu [ \", len);\n for (size_t i = 0; i < len; i++) {\n printf(\"%02X \", (unsigned char)data[i]);\n }\n dprintf(\"]\\n\");\n dprintf(\". [\");\n for (size_t i = 0; i < len; i++) {\n unsigned char ch = data[i];\n if (ch < ' ') {\n ch = '?';\n }\n dprintf(\"%c\", ch);\n }\n dprintf(\"]\\n\");\n}\n\nstatic int32_t read_i32(const char *data) {\n return ((uint32_t)(uint8_t)data[0] << 24) |\n ((uint32_t)(uint8_t)data[1] << 16) |\n ((uint32_t)(uint8_t)data[2] << 8) |\n ((uint32_t)(uint8_t)data[3] << 0);\n}\n\nstatic void write_i32(char *data, int32_t x) {\n data[0] = (uint8_t)(((uint32_t)x) >> 24) & 0xFF;\n data[1] = (uint8_t)(((uint32_t)x) >> 16) & 0xFF;\n data[2] = (uint8_t)(((uint32_t)x) >> 8) & 0xFF;\n data[3] = (uint8_t)(((uint32_t)x) >> 0) & 0xFF;\n}\n\nstatic int16_t read_i16(const char *data) {\n return ((uint16_t)(uint8_t)data[0] << 8) |\n ((uint16_t)(uint8_t)data[1] << 0);\n}\nstatic void write_i16(char *data, int16_t x) {\n data[0] = (uint8_t)(((uint16_t)x) >> 8) & 0xFF;\n data[1] = (uint8_t)(((uint16_t)x) >> 0) & 0xFF;\n}\n\n// parse_begin is called to begin parsing a client message.\n#define parse_begin() \\\n const char *p = data; \\\n const char *e = p+len; \\\n (void)args, (void)pg, (void)e;\n\n// parse_end is called when parsing client message is complete.\n// This will check that the position of the client stream matches the\n// expected lenght provided by the client. \n#define parse_end() \\\n if ((size_t)(p-data) != len) { \\\n return -1; \\\n }\n\n#define parse_cstr() ({ \\\n const char *cstr = 0; \\\n const char *s = p; \\\n while (p < e) { \\\n if (*p == '\\0') { \\\n cstr = s; \\\n p++; \\\n break; \\\n } \\\n p++; \\\n } \\\n if (!cstr) { \\\n return -1; \\\n } \\\n cstr; \\\n}) \n\n#define parse_int16() ({ \\\n if (e-p < 2) { \\\n return -1; \\\n } \\\n int16_t x = read_i16(p); \\\n p += 2; \\\n x; \\\n})\n\n#define parse_byte() ({ \\\n if (e-p < 1) { \\\n return -1; \\\n } \\\n uint8_t x = *p; \\\n p += 1; \\\n x; \\\n})\n\n#define parse_int32() ({ \\\n if (e-p < 4) { \\\n return -1; \\\n } \\\n int32_t x = read_i32(p); \\\n p += 4; \\\n x; \\\n})\n\n#define parse_bytes(n) ({ \\\n if (e-p < n) { \\\n return -1; \\\n } \\\n const void *s = p; \\\n p += (n); \\\n s; \\\n})\n\nstatic void arg_append_unescape_simplestr(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n char *str2 = xmalloc(slen+1);\n for (size_t i = 0; i < str2len; i++) {\n if (str[i] == '\\'' && str[i+1] == '\\'') {\n i++;\n }\n str2[str2len++] = str[i];\n }\n args_append(args, str2, str2len, false);\n xfree(str2);\n}\n\nstatic void pg_statement_free(struct pg_statement *statement) {\n args_free(&statement->args);\n buf_clear(&statement->argtypes);\n}\n\n\nstatic void pg_portal_free(struct pg_portal *portal) {\n args_free(&portal->params);\n}\n\nstatic void statments_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n pg_statement_free(&statement);\n }\n hashmap_free(map);\n}\n\nstatic void portals_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n pg_portal_free(&portal);\n }\n hashmap_free(map);\n}\n\nstruct pg *pg_new(void) {\n struct pg *pg = xmalloc(sizeof(struct pg));\n memset(pg, 0, sizeof(struct pg));\n pg->oid = TEXTOID;\n return pg;\n}\n\nvoid pg_free(struct pg *pg) {\n if (!pg) {\n return;\n }\n xfree(pg->application_name);\n xfree(pg->database);\n xfree(pg->user);\n buf_clear(&pg->buf);\n statments_free(pg->statements);\n portals_free(pg->portals);\n args_free(&pg->targs);\n // args_free(&pg->xargs);\n xfree(pg->desc);\n xfree(pg);\n}\n\nstatic uint64_t pg_statement_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n return hashmap_murmur(statement.name, strlen(statement.name), seed0, seed1);\n}\n\nstatic uint64_t pg_portal_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n return hashmap_murmur(portal.name, strlen(portal.name), seed0, seed1);\n}\n\nstatic int pg_statement_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_statement stmta;\n memcpy(&stmta, a, sizeof(struct pg_statement));\n struct pg_statement stmtb;\n memcpy(&stmtb, b, sizeof(struct pg_statement));\n return strcmp(stmta.name, stmtb.name);\n}\n\nstatic int pg_portal_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_portal portala;\n memcpy(&portala, a, sizeof(struct pg_portal));\n struct pg_portal portalb;\n memcpy(&portalb, b, sizeof(struct pg_portal));\n return strcmp(portala.name, portalb.name);\n}\n\nstatic void portal_insert(struct pg *pg, struct pg_portal *portal) {\n (void)portal;\n if (!pg->portals) {\n pg->portals = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_portal), 0, 0, 0, pg_portal_hash, \n pg_portal_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->portals, portal);\n if (ptr) {\n struct pg_portal old;\n memcpy(&old, ptr, sizeof(struct pg_portal));\n pg_portal_free(&old);\n }\n}\n\nstatic void statement_insert(struct pg *pg, struct pg_statement *stmt) {\n if (!pg->statements) {\n pg->statements = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_statement), 0, 0, 0, pg_statement_hash, \n pg_statement_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->statements, stmt);\n if (ptr) {\n struct pg_statement old;\n memcpy(&old, ptr, sizeof(struct pg_statement));\n pg_statement_free(&old);\n }\n}\n\nstatic bool statement_get(struct pg *pg, const char *name, \n struct pg_statement *stmt)\n{\n if (!pg->statements) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_statement key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->statements, &key);\n if (!ptr) {\n return false;\n }\n memcpy(stmt, ptr, sizeof(struct pg_statement));\n return true;\n}\n\nstatic bool portal_get(struct pg *pg, const char *name, \n struct pg_portal *portal)\n{\n if (!pg->portals) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_portal key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->portals, &key);\n if (!ptr) {\n return false;\n }\n memcpy(portal, ptr, sizeof(struct pg_portal));\n return true;\n}\n\nstatic const uint8_t hextoks[256] = { \n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4,5,6,7,8,9,0,0,0,0,0,0,\n 0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n};\n\nstatic uint32_t decode_hex(const uint8_t *str) {\n return (((int)hextoks[str[0]])<<12) | (((int)hextoks[str[1]])<<8) |\n (((int)hextoks[str[2]])<<4) | (((int)hextoks[str[3]])<<0);\n}\n\nstatic bool is_surrogate(uint32_t cp) {\n return cp > 55296 && cp < 57344;\n}\n\nstatic uint32_t decode_codepoint(uint32_t cp1, uint32_t cp2) {\n return cp1 > 55296 && cp1 < 56320 && cp2 > 56320 && cp2 < 57344 ?\n ((cp1 - 55296) << 10) | ((cp2 - 56320) + 65536) :\n 65533;\n}\n\nstatic inline int encode_codepoint(uint8_t dst[], uint32_t cp) {\n if (cp < 128) {\n dst[0] = cp;\n return 1;\n } else if (cp < 2048) {\n dst[0] = 192 | (cp >> 6);\n dst[1] = 128 | (cp & 63);\n return 2;\n } else if (cp > 1114111 || is_surrogate(cp)) {\n cp = 65533; // error codepoint\n }\n if (cp < 65536) {\n dst[0] = 224 | (cp >> 12);\n dst[1] = 128 | ((cp >> 6) & 63);\n dst[2] = 128 | (cp & 63);\n return 3;\n }\n dst[0] = 240 | (cp >> 18);\n dst[1] = 128 | ((cp >> 12) & 63);\n dst[2] = 128 | ((cp >> 6) & 63);\n dst[3] = 128 | (cp & 63);\n return 4;\n}\n\n// for_each_utf8 iterates over each UTF-8 bytes in jstr, unescaping along the\n// way. 'f' is a loop expression that will make available the 'ch' char which \n// is just a single byte in a UTF-8 series.\n// this is taken from https://github.com/tidwall/json.c\n#define for_each_utf8(jstr, len, f) { \\\n size_t nn = (len); \\\n int ch = 0; \\\n (void)ch; \\\n for (size_t ii = 0; ii < nn; ii++) { \\\n if ((jstr)[ii] != '\\\\') { \\\n ch = (jstr)[ii]; \\\n if (1) f \\\n continue; \\\n }; \\\n ii++; \\\n if (ii == nn) break; \\\n switch ((jstr)[ii]) { \\\n case '\\\\': ch = '\\\\'; break; \\\n case '/' : ch = '/'; break; \\\n case 'b' : ch = '\\b'; break; \\\n case 'f' : ch = '\\f'; break; \\\n case 'n' : ch = '\\n'; break; \\\n case 'r' : ch = '\\r'; break; \\\n case 't' : ch = '\\t'; break; \\\n case '\"' : ch = '\"'; break; \\\n case 'u' : \\\n if (ii+5 > nn) { nn = 0; continue; }; \\\n uint32_t cp = decode_hex((jstr)+ii+1); \\\n ii += 5; \\\n if (is_surrogate(cp)) { \\\n if (nn-ii >= 6 && (jstr)[ii] == '\\\\' && (jstr)[ii+1] == 'u') { \\\n cp = decode_codepoint(cp, decode_hex((jstr)+ii+2)); \\\n ii += 6; \\\n } \\\n } \\\n uint8_t _bytes[4]; \\\n int _n = encode_codepoint(_bytes, cp); \\\n for (int _j = 0; _j < _n; _j++) { \\\n ch = _bytes[_j]; \\\n if (1) f \\\n } \\\n ii--; \\\n continue; \\\n default: \\\n continue; \\\n }; \\\n if (1) f \\\n } \\\n}\n\nstatic void arg_append_unescape_str(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n uint8_t *str2 = xmalloc(slen+1);\n for_each_utf8((uint8_t*)str, slen, {\n str2[str2len++] = ch;\n });\n args_append(args, (char*)str2, str2len, false);\n xfree(str2);\n}\n\n// Very simple map to stores all params numbers.\nstruct pmap {\n int count;\n int nbuckets;\n uint16_t *buckets;\n uint16_t def[8];\n};\n\nstatic void pmap_init(struct pmap *map) {\n memset(map, 0, sizeof(struct pmap));\n map->nbuckets = sizeof(map->def)/sizeof(uint16_t);\n map->buckets = map->def;\n}\n\nstatic void pmap_free(struct pmap *map) {\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n}\n\nstatic void pmap_insert0(uint16_t *buckets, int nbuckets, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%nbuckets;\n while (1) {\n if (buckets[i] == 0) {\n buckets[i] = param;\n return;\n }\n i = (i+1)%nbuckets;\n }\n}\n\nstatic void pmap_grow(struct pmap *map) {\n int nbuckets2 = map->nbuckets*2;\n uint16_t *buckets2 = xmalloc(nbuckets2*sizeof(uint16_t));\n memset(buckets2, 0, nbuckets2*sizeof(uint16_t));\n for (int i = 0; i < map->nbuckets; i++) {\n if (map->buckets[i]) {\n pmap_insert0(buckets2, nbuckets2, map->buckets[i]);\n }\n }\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n map->buckets = buckets2;\n map->nbuckets = nbuckets2;\n}\n\nstatic void pmap_insert(struct pmap *map, uint16_t param) {\n assert(param != 0);\n if (map->count == (map->nbuckets>>1)+(map->nbuckets>>2)) {\n pmap_grow(map);\n }\n pmap_insert0(map->buckets, map->nbuckets, param);\n map->count++;\n}\n\nstatic bool pmap_exists(struct pmap *map, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%map->nbuckets;\n while (1) {\n if (map->buckets[i] == 0) {\n return false;\n }\n if (map->buckets[i] == param) {\n return true;\n }\n i = (i+1)%map->nbuckets;\n }\n}\n\nstatic bool parse_query_args(const char *query, struct args *args, \n int *nparams, struct buf *argtypes)\n{\n dprintf(\"parse_query: [%s]\\n\", query);\n struct pmap pmap;\n pmap_init(&pmap);\n\n // loop through each keyword\n while (isspace(*query)) {\n query++;\n }\n bool ok = false;\n bool esc = false;\n const char *str;\n const char *p = query;\n bool join = false;\n while (*p) {\n switch (*p) {\n case ';':\n goto break_while;\n case '\\\"':\n // identifier\n parse_errorf(\"idenifiers not allowed\");\n goto done;\n case '\\'':\n // simple string\n p++;\n str = p;\n esc = false;\n while (*p) {\n if (*p == '\\'') {\n if (*(p+1) == '\\'') {\n esc = true;\n p += 2;\n continue;\n }\n break;\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_simplestr(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n case '$':\n // dollar-quote or possible param\n if (*(p+1) >= '0' && *(p+1) <= '9') {\n char *e = 0;\n long param = strtol(p+1, &e, 10);\n if (param == 0 || param > 0xFFFF) {\n parse_errorf(\"there is no parameter $%ld\", param);\n goto done;\n }\n pmap_insert(&pmap, param);\n args_append(args, p, e-p, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'P'+join);\n join = *e && !isspace(*e);\n }\n p = e;\n continue;\n }\n // dollar-quote strings not\n parse_errorf(\"dollar-quote strings not allowed\");\n goto done;\n case 'E': case 'e':\n if (*(p+1) == '\\'') {\n // escaped string\n p += 2;\n str = p;\n while (*p) {\n if (*p == '\\\\') {\n esc = true;\n } else if (*p == '\\'') {\n size_t x = 0;\n while (*(p-x-1) == '\\\\') {\n x++;\n }\n if ((x%2)==0) {\n break;\n }\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_str(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n }\n // fallthrough\n default:\n if (isspace(*p)) {\n p++;\n continue;\n }\n // keyword\n const char *keyword = p;\n while (*p && !isspace(*p)) {\n if (*p == ';' || *p == '\\'' || *p == '\\\"' || *p == '$') {\n break;\n }\n p++;\n }\n size_t keywordlen = p-keyword;\n args_append(args, keyword, keywordlen, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *p && !isspace(*p);\n }\n while (isspace(*p)) {\n p++;\n }\n continue;\n }\n p++;\n }\nbreak_while:\n while (*p) {\n if (*p != ';') {\n parse_errorf(\"unexpected characters at end of query\");\n goto done;\n }\n p++;\n }\n ok = true;\ndone:\n if (ok) {\n // check params\n for (int i = 0; i < pmap.count; i++) {\n if (!pmap_exists(&pmap, i+1)) {\n parse_errorf(\"missing parameter $%d\", i+1);\n ok = false;\n break;\n }\n }\n }\n *nparams = pmap.count;\n pmap_free(&pmap);\n if (argtypes) {\n buf_append_byte(argtypes, '\\0');\n }\n return ok;\n}\n\nstatic bool parse_cache_query_args(const char *query, struct args *args,\n int *maxparam, struct buf *argtypes)\n{\n while (isspace(*query)) {\n query++;\n }\n if (!parse_query_args(query, args, maxparam, argtypes)) {\n return false;\n }\n#ifdef PGDEBUG\n args_print(args);\n#endif\n if (argtypes) {\n dprintf(\"argtypes: [%s]\\n\", argtypes->data);\n }\n return true;\n}\n\nstatic size_t parseQ(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Query\n dprintf(\">>> Query\\n\");\n parse_begin();\n const char *query = parse_cstr();\n parse_end();\n int nparams = 0;\n bool pok = parse_cache_query_args(query, args, &nparams, 0);\n if (!pok) {\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (nparams > 0) {\n parse_seterror(\"query cannot have parameters\");\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (args->len == 0) {\n pg->empty_query = 1;\n }\n return len;\n}\n\nstatic size_t parseP(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Parse\n dprintf(\"<<< Parse\\n\");\n // print_packet(data, len);\n parse_begin();\n const char *stmt_name = parse_cstr();\n const char *query = parse_cstr();\n uint16_t num_param_types = parse_int16();\n // dprintf(\". Parse [%s] [%s] [%d]\\n\", stmt_name, query,\n // (int)num_param_types);\n for (uint16_t i = 0; i < num_param_types; i++) {\n int32_t param_type = parse_int32();\n (void)param_type;\n // dprintf(\". [%d]\\n\", param_type);\n }\n parse_end();\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n int nparams = 0;\n struct buf argtypes = { 0 };\n bool ok = parse_cache_query_args(query, args, &nparams, &argtypes);\n if (!ok) {\n pg->error = 1;\n args_clear(args);\n buf_clear(&argtypes);\n return len;\n }\n // copy over last statement\n struct pg_statement stmt = { 0 };\n strcpy(stmt.name, stmt_name);\n stmt.nparams = nparams;\n // copy over parsed args\n for (size_t i = 0; i < args->len; i++) {\n args_append(&stmt.args, args->bufs[i].data, args->bufs[i].len, false);\n }\n args_clear(args);\n stmt.argtypes = argtypes;\n statement_insert(pg, &stmt);\n pg->parse = 1;\n return len;\n}\n\nstatic size_t parseD(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Describe\n dprintf(\"<<< Describe\\n\");\n if (pg->describe) {\n // Already has a describe in a sequence\n pg->error = 1;\n parse_errorf(\"double describe not allowed\");\n return -1;\n }\n // print_packet(data, len);\n parse_begin();\n uint8_t type = parse_byte();\n const char *name = parse_cstr();\n parse_end();\n\n dprintf(\". Describe [%c] [%s]\\n\", type, name);\n if (type == 'P' || type == 'P'+1) {\n struct pg_portal portal;\n if (!portal_get(pg, name, &portal)) {\n parse_errorf(\"portal not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('T')\n // Int32 length\n // Int16 field_count\n // Field[] fields\n // all fields are unnamed text\n char field[] = { \n 0x00, // \"\\0\" (field name)\n 0x00, 0x00, 0x00, 0x00, // table_oid = 0\n 0x00, 0x00, // column_attr_no = 0\n 0x00, 0x00, 0x00, pg->oid, // type_oid = 25 (text)\n 0xFF, 0xFF, // type_size = -1\n 0xFF, 0xFF, 0xFF, 0xFF, // type_modifier = -1\n 0x00, 0x00, // format_code = 0 (text)\n };\n static_assert(sizeof(field) == 19, \"\");\n size_t size = 1+4+2+portal.params.len*sizeof(field);\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 'T';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, portal.params.len);\n p1 += 2;\n for (size_t i = 0; i < portal.params.len; i++) {\n memcpy(p1, field, sizeof(field));\n p1 += sizeof(field);\n }\n pg->desclen = size;\n return len;\n }\n\n if (type == 'S') {\n struct pg_statement stmt;\n if (!statement_get(pg, name, &stmt)) {\n parse_errorf(\"statement not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('t')\n // Int32 length\n // Int16 num_params\n // Int32[] param_type_oids\n size_t size = 1+4+2+stmt.nparams*4;\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 't';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, stmt.nparams);\n p1 += 2;\n for (int i = 0; i < stmt.nparams; i++) {\n write_i32(p1, pg->oid);\n p1 += 4;\n }\n pg->desclen = size;\n pg->describe = 1;\n return len;\n }\n parse_errorf(\"unsupported describe type '%c'\", type);\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseB(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n\n // Bind\n dprintf(\"<<< Bind\\n\");\n\n // print_packet(data, len);\n\n // X Byte1('B') # Bind message identifier\n // X Int32 length # Message length including self\n //\n // String portal_name # Destination portal (\"\" = unnamed)\n // String statement_name # Prepared statement name (from Parse)\n // Int16 num_format_codes # 0 = all text, 1 = one for all, or N\n // [Int16] format_codes # 0 = text, 1 = binary\n // Int16 num_parameters\n // [parameter values]\n // Int16 num_result_formats\n // [Int16] result_format_codes\n\n parse_begin();\n const char *portal_name = parse_cstr();\n const char *stmt_name = parse_cstr();\n int num_formats = parse_int16();\n for (int i = 0; i < num_formats; i++) {\n int format = parse_int16();\n if (format != 0 && format != 1) {\n parse_errorf(\"only text or binary format allowed\");\n pg->error = 1;\n return len;\n }\n }\n uint16_t num_params = parse_int16();\n args_clear(&pg->targs);\n for (int i = 0; i < num_params; i++) {\n int32_t len = parse_int32();\n if (len <= 0) {\n // Nulls are empty strings\n len = 0;\n }\n const char *b = parse_bytes(len);\n args_append(&pg->targs, b, len, false);\n }\n // ignore result formats\n uint16_t num_result_formats = parse_int16();\n for (int i = 0; i < num_result_formats; i++) {\n int result_format_codes = parse_int16();\n (void)result_format_codes;\n }\n parse_end();\n\n if (strlen(portal_name) >= PGNAMEDATALEN) {\n parse_seterror(\"portal name too large\");\n pg->error = 1;\n return len;\n }\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n struct pg_portal portal = { 0 };\n strcpy(portal.name, portal_name);\n strcpy(portal.stmt, stmt_name);\n memcpy(&portal.params, &pg->targs, sizeof(struct args));\n memset(&pg->targs, 0, sizeof(struct args));\n portal_insert(pg, &portal);\n pg->bind = 1;\n return len;\n}\n\nstatic size_t parseX(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Close\n dprintf(\"<<< Close\\n\");\n parse_begin();\n parse_end();\n pg->close = 1;\n return len;\n}\n\nstatic size_t parseE(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Execute\n dprintf(\"<<< Execute\\n\");\n parse_begin();\n const char *portal_name = parse_cstr();\n size_t max_rows = parse_int32();\n parse_end();\n struct pg_portal portal;\n if (!portal_get(pg, portal_name, &portal)) {\n parse_seterror(\"portal not found\");\n pg->error = 1;\n return len;\n }\n struct pg_statement stmt;\n if (!statement_get(pg, portal.stmt, &stmt)) {\n parse_seterror(\"statement not found\");\n pg->error = 1;\n return len;\n }\n if ((size_t)stmt.nparams != portal.params.len) {\n parse_seterror(\"portal params mismatch\");\n pg->error = 1;\n return len;\n }\n // ignore max_rows\n (void)max_rows;\n\n // \n args_clear(&pg->targs);\n for (size_t i = 0; i < stmt.args.len; i++) {\n const char *arg = stmt.args.bufs[i].data;\n size_t arglen = stmt.args.bufs[i].len;\n char atype = stmt.argtypes.data[i];\n dprintf(\"[%.*s] [%c]\\n\", (int)arglen, arg, atype);\n bool join = false;\n switch (atype) {\n case 'A'+1:\n atype = 'A';\n join = true;\n break;\n case 'P':\n join = false;\n break;\n case 'P'+1:\n atype = 'P';\n join = true;\n break;\n }\n if (atype == 'P') {\n if (arglen == 0 || arg[0] != '$') {\n goto internal_error;\n }\n uint64_t x;\n bool ok = parse_u64(arg+1, arglen-1, &x);\n if (!ok || x == 0 || x > 0xFFFF) {\n goto internal_error;\n }\n size_t paramidx = x-1;\n if (paramidx >= portal.params.len) {\n goto internal_error;\n }\n arg = portal.params.bufs[paramidx].data;\n arglen = portal.params.bufs[paramidx].len;\n }\n if (join) {\n assert(pg->targs.len > 0);\n buf_append(&pg->targs.bufs[pg->targs.len-1], arg, arglen);\n } else {\n args_append(&pg->targs, arg, arglen, false);\n }\n }\n\n struct args swapargs = *args;\n *args = pg->targs;\n pg->targs = swapargs;\n\n#ifdef PGDEBUG\n args_print(args);\n#endif\n\n pg->execute = 1;\n return len;\ninternal_error:\n parse_seterror(\"portal params internal error\");\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseS(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args;\n // Sync\n dprintf(\"<<< Sync\\n\");\n // print_packet(data, len);\n parse_begin();\n parse_end();\n pg->sync = 1;\n return len;\n}\n\nstatic size_t parsep(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // PasswordMessage\n parse_begin();\n const char *password = parse_cstr();\n parse_end();\n if (strcmp(password, auth) != 0) {\n parse_seterror(\n \"WRONGPASS invalid username-password pair or user is disabled.\");\n return -1;\n }\n pg->auth = 1;\n return len;\n}\n\nstatic ssize_t parse_message(const char *data, size_t len, struct args *args,\n struct pg *pg)\n{\n if (len < 5) {\n return 0;\n }\n int msgbyte = data[0];\n size_t msglen = read_i32(data+1);\n if (len < msglen+1) {\n return 0;\n }\n msglen -= 4;\n data += 5;\n ssize_t ret;\n switch (msgbyte) {\n case 'Q':\n ret = parseQ(data, msglen, args, pg);\n break;\n case 'P':\n ret = parseP(data, msglen, args, pg);\n break;\n case 'X':\n ret = parseX(data, msglen, args, pg);\n break;\n case 'E':\n ret = parseE(data, msglen, args, pg);\n break;\n case 'p': // lowercase\n ret = parsep(data, msglen, args, pg);\n break;\n case 'D':\n ret = parseD(data, msglen, args, pg);\n break;\n case 'B':\n ret = parseB(data, msglen, args, pg);\n break;\n case 'S':\n ret = parseS(data, msglen, args, pg);\n break;\n default:\n pg->error = 1;\n parse_errorf(\"unknown message '%c'\", msgbyte);\n ret = msglen;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+5;\n}\n\nstatic ssize_t parse_magic_ssl(const char *data, size_t len, struct pg *pg) {\n (void)data;\n // SSLRequest\n pg->ssl = 1;\n return len;\n}\n\nstatic ssize_t parse_magic_proto3(const char *data, size_t len, struct pg *pg) {\n // StartupMessage\n const char *p = (void*)data;\n const char *e = p+len;\n // Read parameters\n const char *user = \"\";\n const char *database = \"\";\n const char *application_name = \"\";\n const char *client_encoding = \"\";\n const char *name = 0;\n const char *s = (char*)p;\n while (p < e) {\n if (*p == '\\0') {\n if (s != p) {\n if (name) {\n if (strcmp(name, \"database\") == 0) {\n database = s;\n } else if (strcmp(name, \"application_name\") == 0) {\n application_name = s;\n } else if (strcmp(name, \"client_encoding\") == 0) {\n client_encoding = s;\n } else if (strcmp(name, \"user\") == 0) {\n user = s;\n }\n name = 0;\n } else {\n name = s;\n }\n }\n s = p+1;\n }\n p++;\n }\n // dprintf(\". database=%s, application_name=%s, client_encoding=%s, \"\n // \"user=%s\\n\", database, application_name, client_encoding, user);\n if (*client_encoding && strcmp(client_encoding, \"UTF8\") != 0) {\n printf(\"# Invalid Postgres client_encoding (%s)\\n\",\n client_encoding);\n return -1;\n }\n pg->user = xmalloc(strlen(user)+1);\n strcpy((char*)pg->user, user);\n pg->database = xmalloc(strlen(database)+1);\n strcpy((char*)pg->database, database);\n pg->application_name = xmalloc(strlen(application_name)+1);\n strcpy((char*)pg->application_name, application_name);\n pg->startup = 1;\n return p-data;\n}\n\nstatic ssize_t parse_magic_cancel(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n parse_errorf(\"cancel message unsupported\");\n return -1;\n}\n\nstatic ssize_t parse_magic(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n if (len < 4) {\n return 0;\n }\n size_t msglen = read_i32(data);\n if (msglen > 65536) {\n parse_errorf(\"message too large\");\n return -1;\n }\n if (len < msglen) {\n return 0;\n }\n if (msglen < 8) {\n parse_errorf(\"invalid message\");\n return -1;\n }\n // dprintf(\"parse_magic\\n\");\n uint32_t magic = read_i32(data+4);\n data += 8;\n msglen -= 8;\n ssize_t ret;\n switch (magic) {\n case 0x04D2162F: \n ret = parse_magic_ssl(data, msglen, pg);\n break;\n case 0x00030000: \n ret = parse_magic_proto3(data, msglen, pg);\n break;\n case 0xFFFF0000: \n ret = parse_magic_cancel(data, msglen, pg);\n break;\n default:\n parse_errorf(\"Protocol error: unknown magic number %08x\", magic);\n ret = -1;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+8;\n}\n\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pgptr)\n{\n (void)print_packet;\n // print_packet(data, len);\n struct pg *pg = *pgptr;\n if (!pg) {\n pg = pg_new();\n *pgptr = pg;\n }\n pg->error = 0;\n if (len == 0) {\n return 0;\n }\n if (data[0] == 0) {\n return parse_magic(data, len, pg);\n }\n return parse_message(data, len, args, pg);\n}\n\nvoid pg_write_auth(struct conn *conn, unsigned char code) {\n unsigned char bytes[] = { \n 'R', 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n}\n\nvoid pg_write_ready(struct conn *conn, unsigned char code) {\n if (!pg_execute(conn)) {\n unsigned char bytes[] = { \n 'Z', 0x0, 0x0, 0x0, 0x5, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n }\n}\n\nvoid pg_write_status(struct conn *conn, const char *key, const char *val) {\n size_t keylen = strlen(key);\n size_t vallen = strlen(val);\n int32_t size = 4+keylen+1+vallen+1;\n char *bytes = xmalloc(1+size);\n bytes[0] = 'S';\n write_i32(bytes+1, size);\n memcpy(bytes+1+4,key,keylen+1);\n memcpy(bytes+1+4+keylen+1,val,vallen+1);\n conn_write_raw(conn, bytes, 1+size);\n xfree(bytes);\n}\n\nvoid pg_write_row_desc(struct conn *conn, const char **fields, int nfields){\n size_t size = 1+4+2;\n for (int i = 0; i < nfields; i++) {\n size += strlen(fields[i])+1;\n size += 4+2+4+2+4+2;\n }\n int oid = conn_pg(conn)->oid;\n char *bytes = xmalloc(size);\n bytes[0] = 'T';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, nfields); // field_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < nfields; i++) {\n size_t fsize = strlen(fields[i]);\n memcpy(p, fields[i], fsize+1);\n p += fsize+1;\n write_i32(p, 0); // table_oid\n p += 4;\n write_i16(p, 0); // column_attr_number\n p += 2;\n write_i32(p, oid); // type_oid\n p += 4;\n write_i16(p, -1); // type_size\n p += 2;\n write_i32(p, -1); // type_modifier\n p += 4;\n write_i16(p, 1); // format_code\n p += 2;\n }\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_row_data(struct conn *conn, const char **cols, \n const size_t *collens, int ncols)\n{\n size_t size = 1+4+2;\n for (int i = 0; i < ncols; i++) {\n size += 4+collens[i];\n }\n char *bytes = xmalloc(size);\n bytes[0] = 'D';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, ncols); // column_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < ncols; i++) {\n write_i32(p, collens[i]); // column_length\n p += 4;\n#ifdef PGDEBUG\n printf(\" ROW >>>> len:%zu [\", collens[i]);\n binprint(cols[i], collens[i]);\n printf(\"]\\n\");\n#endif\n memcpy(p, cols[i], collens[i]); // column_data\n p += collens[i];\n }\n \n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_complete(struct conn *conn, const char *tag){\n size_t taglen = strlen(tag);\n size_t size = 1+4+taglen+1;\n char *bytes = xmalloc(size);\n bytes[0] = 'C';\n write_i32(bytes+1, size-1); // message_size\n memcpy(bytes+1+4, tag, taglen+1);\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_completef(struct conn *conn, const char *tag_format, ...){\n // initializing list pointer\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_complete(conn, tag);\n}\n\nvoid pg_write_simple_row_data_ready(struct conn *conn, const char *desc,\n const void *row, size_t len, const char *tag)\n{\n pg_write_row_desc(conn, (const char*[]){ desc }, 1);\n pg_write_row_data(conn, (const char*[]){ row }, (size_t[]){ len }, 1);\n pg_write_complete(conn, tag);\n pg_write_ready(conn, 'I');\n}\n\nvoid pg_write_simple_row_str_ready(struct conn *conn, const char *desc,\n const char *row, const char *tag)\n{\n pg_write_simple_row_data_ready(conn, desc, row, strlen(row), tag);\n}\n\nvoid pg_write_simple_row_i64_ready(struct conn *conn, const char *desc,\n int64_t row, const char *tag)\n{\n char val[32];\n snprintf(val, sizeof(val), \"%\" PRIi64, row);\n pg_write_simple_row_str_ready(conn, desc, val, tag);\n}\n\nvoid pg_write_simple_row_str_readyf(struct conn *conn, const char *desc,\n const char *row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_str_ready(conn, desc, row, tag);\n}\n\nvoid pg_write_simple_row_i64_readyf(struct conn *conn, const char *desc,\n int64_t row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_i64_ready(conn, desc, row, tag);\n}\n\nstatic void write_auth_ok(struct conn *conn, struct pg *pg) {\n // dprintf(\">> AuthOK\\n\");\n pg_write_auth(conn, 0); // AuthOK;\n // startup message received, respond\n pg_write_status(conn, \"client_encoding\", \"UTF8\");\n pg_write_status(conn, \"server_encoding\", \"UTF8\");\n char status[128];\n snprintf(status, sizeof(status), \"%s (Pogocache)\", version);\n pg_write_status(conn, \"server_version\", status);\n pg_write_ready(conn, 'I'); // Idle;\n pg->ready = 1;\n}\n\n// Respond to various the connection states.\n// Returns true if the all responses complete or false if there was an\n// error.\nbool pg_respond(struct conn *conn, struct pg *pg) {\n if (pg->error) {\n conn_write_error(conn, parse_lasterror());\n return true;\n }\n if (pg->empty_query) {\n dprintf(\"====== pg_respond(pg->empty_query) =====\\n\");\n conn_write_raw(conn, \"I\\0\\0\\0\\4\", 5);\n conn_write_raw(conn, \"Z\\0\\0\\0\\5I\", 6);\n pg->empty_query = 0;\n return true;\n }\n if (pg->parse) {\n dprintf(\"====== pg_respond(pg->parse) =====\\n\");\n conn_write_raw(conn, \"1\\0\\0\\0\\4\", 5);\n pg->parse = 0;\n return true;\n }\n if (pg->bind) {\n dprintf(\"====== pg_respond(pg->bind) =====\\n\");\n conn_write_raw(conn, \"2\\0\\0\\0\\4\", 5);\n pg->bind = 0;\n return true;\n }\n if (pg->describe) {\n dprintf(\"====== pg_respond(pg->describe) =====\\n\");\n assert(pg->desc);\n conn_write_raw(conn, pg->desc, pg->desclen);\n xfree(pg->desc);\n pg->desc = 0;\n pg->desclen = 0;\n pg->describe = 0;\n return true;\n }\n if (pg->sync) {\n dprintf(\"====== pg_respond(pg->sync) =====\\n\");\n pg->execute = 0;\n pg_write_ready(conn, 'I');\n pg->sync = 0;\n return true;\n }\n if (pg->close) {\n dprintf(\"====== pg_respond(pg->close) =====\\n\");\n pg->close = 0;\n return false;\n }\n if (pg->ssl == 1) {\n if (!conn_istls(conn)) {\n conn_write_raw_cstr(conn, \"N\");\n } else {\n conn_write_raw_cstr(conn, \"Y\");\n }\n pg->ssl = 0;\n return true;\n }\n if (pg->auth == 1) {\n if (pg->startup == 0) {\n return false;\n }\n conn_setauth(conn, true);\n write_auth_ok(conn, pg);\n pg->auth = 0;\n return true;\n }\n if (pg->startup == 1) {\n if (auth && *auth) {\n pg_write_auth(conn, 3); // AuthenticationCleartextPassword;\n } else {\n write_auth_ok(conn, pg);\n pg->startup = 0;\n }\n return true;\n }\n return true;\n}\n\nvoid pg_write_error(struct conn *conn, const char *msg) {\n size_t msglen = strlen(msg);\n size_t size = 1+4;\n size += 1+5+1; // 'S' \"ERROR\" \\0\n size += 1+5+1; // 'V' \"ERROR\" \\0\n size += 1+5+1; // 'C' \"23505\" \\0\n size += 1+msglen+1; // 'M' msg \\0\n size += 1; // null-terminator\n char *bytes = xmalloc(size);\n bytes[0] = 'E';\n write_i32(bytes+1, size-1);\n char *p = bytes+1+4;\n memcpy(p, \"SERROR\", 7);\n p += 7;\n memcpy(p, \"VERROR\", 7);\n p += 7;\n memcpy(p, \"C23505\", 7);\n p += 7;\n p[0] = 'M';\n p++;\n memcpy(p, msg, msglen+1);\n p += msglen+1;\n p[0] = '\\0';\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\n// return true if the command need further execution, of false if this\n// operation handled it already\nbool pg_precommand(struct conn *conn, struct args *args, struct pg *pg) {\n#ifdef PGDEBUG\n printf(\"precommand: \");\n args_print(args);\n#endif\n if (args->len > 0 && args->bufs[0].len > 0) {\n char c = tolower(args->bufs[0].data[0]);\n if (c == 'b' || c == 'r' || c == 'c') {\n // silently ignore transaction commands.\n if (c == 'b' && argeq(args, 0, \"begin\")) {\n pg_write_completef(conn, \"BEGIN\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"rollback\")) {\n pg_write_completef(conn, \"ROLLBACK\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"commit\")) {\n pg_write_completef(conn, \"COMMIT\");\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n if (c == ':' && args->bufs[0].len > 1 && args->bufs[0].data[1] == ':') {\n if (argeq(args, 0, \"::bytea\") || argeq(args, 0, \"::bytes\")) {\n pg->oid = BYTEAOID;\n } else if (argeq(args, 0, \"::text\")) {\n pg->oid = TEXTOID;\n } else {\n char err[128];\n snprintf(err, sizeof(err), \"unknown type '%.*s'\", \n (int)(args->bufs[0].len-2), args->bufs[0].data+2);\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n return false;\n }\n args_remove_first(args);\n if (args->len == 0) {\n if (pg->oid == BYTEAOID) {\n pg_write_completef(conn, \"BYTEA\");\n } else {\n pg_write_completef(conn, \"TEXT\");\n }\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n }\n return true;\n}\n"], ["/pogocache/src/lz4.c", "/*\n LZ4 - Fast LZ compression algorithm\n Copyright (C) 2011-2023, Yann Collet.\n\n BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following disclaimer\n in the documentation and/or other materials provided with the\n distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n You can contact the author at :\n - LZ4 homepage : http://www.lz4.org\n - LZ4 source repository : https://github.com/lz4/lz4\n*/\n\n/*-************************************\n* Tuning parameters\n**************************************/\n/*\n * LZ4_HEAPMODE :\n * Select how stateless compression functions like `LZ4_compress_default()`\n * allocate memory for their hash table,\n * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).\n */\n#ifndef LZ4_HEAPMODE\n# define LZ4_HEAPMODE 0\n#endif\n\n/*\n * LZ4_ACCELERATION_DEFAULT :\n * Select \"acceleration\" for LZ4_compress_fast() when parameter value <= 0\n */\n#define LZ4_ACCELERATION_DEFAULT 1\n/*\n * LZ4_ACCELERATION_MAX :\n * Any \"acceleration\" value higher than this threshold\n * get treated as LZ4_ACCELERATION_MAX instead (fix #876)\n */\n#define LZ4_ACCELERATION_MAX 65537\n\n\n/*-************************************\n* CPU Feature Detection\n**************************************/\n/* LZ4_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n * It can generate buggy code on targets which assembly generation depends on alignment.\n * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */\n# if defined(__GNUC__) && \\\n ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \\\n || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n# define LZ4_FORCE_MEMORY_ACCESS 2\n# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)\n# define LZ4_FORCE_MEMORY_ACCESS 1\n# endif\n#endif\n\n/*\n * LZ4_FORCE_SW_BITCOUNT\n * Define this parameter if your target system or compiler does not support hardware bit count\n */\n#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */\n# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */\n# define LZ4_FORCE_SW_BITCOUNT\n#endif\n\n\n\n/*-************************************\n* Dependency\n**************************************/\n/*\n * LZ4_SRC_INCLUDED:\n * Amalgamation flag, whether lz4.c is included\n */\n#ifndef LZ4_SRC_INCLUDED\n# define LZ4_SRC_INCLUDED 1\n#endif\n\n#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS\n# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */\n#endif\n\n#ifndef LZ4_STATIC_LINKING_ONLY\n# define LZ4_STATIC_LINKING_ONLY\n#endif\n#include \"lz4.h\"\n/* see also \"memory routines\" below */\n\n\n/*-************************************\n* Compiler Options\n**************************************/\n#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */\n# include /* only present in VS2005+ */\n# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */\n# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */\n# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */\n#endif /* _MSC_VER */\n\n#ifndef LZ4_FORCE_INLINE\n# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */\n# define LZ4_FORCE_INLINE static __forceinline\n# else\n# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */\n# if defined (__GNUC__) || defined (__clang__)\n# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))\n# else\n# define LZ4_FORCE_INLINE static inline\n# endif\n# else\n# define LZ4_FORCE_INLINE static\n# endif /* __STDC_VERSION__ */\n# endif /* _MSC_VER */\n#endif /* LZ4_FORCE_INLINE */\n\n/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE\n * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,\n * together with a simple 8-byte copy loop as a fall-back path.\n * However, this optimization hurts the decompression speed by >30%,\n * because the execution does not go to the optimized loop\n * for typical compressible data, and all of the preamble checks\n * before going to the fall-back path become useless overhead.\n * This optimization happens only with the -O3 flag, and -O2 generates\n * a simple 8-byte copy loop.\n * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8\n * functions are annotated with __attribute__((optimize(\"O2\"))),\n * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute\n * of LZ4_wildCopy8 does not affect the compression speed.\n */\n#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)\n# define LZ4_FORCE_O2 __attribute__((optimize(\"O2\")))\n# undef LZ4_FORCE_INLINE\n# define LZ4_FORCE_INLINE static __inline __attribute__((optimize(\"O2\"),always_inline))\n#else\n# define LZ4_FORCE_O2\n#endif\n\n#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)\n# define expect(expr,value) (__builtin_expect ((expr),(value)) )\n#else\n# define expect(expr,value) (expr)\n#endif\n\n#ifndef likely\n#define likely(expr) expect((expr) != 0, 1)\n#endif\n#ifndef unlikely\n#define unlikely(expr) expect((expr) != 0, 0)\n#endif\n\n/* Should the alignment test prove unreliable, for some reason,\n * it can be disabled by setting LZ4_ALIGN_TEST to 0 */\n#ifndef LZ4_ALIGN_TEST /* can be externally provided */\n# define LZ4_ALIGN_TEST 1\n#endif\n\n\n/*-************************************\n* Memory routines\n**************************************/\n\n/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :\n * Disable relatively high-level LZ4/HC functions that use dynamic memory\n * allocation functions (malloc(), calloc(), free()).\n *\n * Note that this is a compile-time switch. And since it disables\n * public/stable LZ4 v1 API functions, we don't recommend using this\n * symbol to generate a library for distribution.\n *\n * The following public functions are removed when this symbol is defined.\n * - lz4 : LZ4_createStream, LZ4_freeStream,\n * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)\n * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,\n * LZ4_createHC (deprecated), LZ4_freeHC (deprecated)\n * - lz4frame, lz4file : All LZ4F_* functions\n */\n#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\n# define ALLOC(s) lz4_error_memory_allocation_is_disabled\n# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled\n# define FREEMEM(p) lz4_error_memory_allocation_is_disabled\n#elif defined(LZ4_USER_MEMORY_FUNCTIONS)\n/* memory management functions can be customized by user project.\n * Below functions must exist somewhere in the Project\n * and be available at link time */\nvoid* LZ4_malloc(size_t s);\nvoid* LZ4_calloc(size_t n, size_t s);\nvoid LZ4_free(void* p);\n# define ALLOC(s) LZ4_malloc(s)\n# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)\n# define FREEMEM(p) LZ4_free(p)\n#else\n# include /* malloc, calloc, free */\n# define ALLOC(s) malloc(s)\n# define ALLOC_AND_ZERO(s) calloc(1,s)\n# define FREEMEM(p) free(p)\n#endif\n\n#if ! LZ4_FREESTANDING\n# include /* memset, memcpy */\n#endif\n#if !defined(LZ4_memset)\n# define LZ4_memset(p,v,s) memset((p),(v),(s))\n#endif\n#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s))\n\n\n/*-************************************\n* Common Constants\n**************************************/\n#define MINMATCH 4\n\n#define WILDCOPYLENGTH 8\n#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */\n#define FASTLOOP_SAFE_DISTANCE 64\nstatic const int LZ4_minLength = (MFLIMIT+1);\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define LZ4_DISTANCE_ABSOLUTE_MAX 65535\n#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */\n# error \"LZ4_DISTANCE_MAX is too big : must be <= 65535\"\n#endif\n\n#define ML_BITS 4\n#define ML_MASK ((1U<=1)\n# include \n#else\n# ifndef assert\n# define assert(condition) ((void)0)\n# endif\n#endif\n\n#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */\n\n#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)\n# include \n static int g_debuglog_enable = 1;\n# define DEBUGLOG(l, ...) { \\\n if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \\\n fprintf(stderr, __FILE__ \" %i: \", __LINE__); \\\n fprintf(stderr, __VA_ARGS__); \\\n fprintf(stderr, \" \\n\"); \\\n } }\n#else\n# define DEBUGLOG(l, ...) {} /* disabled */\n#endif\n\nstatic int LZ4_isAligned(const void* ptr, size_t alignment)\n{\n return ((size_t)ptr & (alignment -1)) == 0;\n}\n\n\n/*-************************************\n* Types\n**************************************/\n#include \n#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include \n typedef uint8_t BYTE;\n typedef uint16_t U16;\n typedef uint32_t U32;\n typedef int32_t S32;\n typedef uint64_t U64;\n typedef uintptr_t uptrval;\n#else\n# if UINT_MAX != 4294967295UL\n# error \"LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4\"\n# endif\n typedef unsigned char BYTE;\n typedef unsigned short U16;\n typedef unsigned int U32;\n typedef signed int S32;\n typedef unsigned long long U64;\n typedef size_t uptrval; /* generally true, except OpenVMS-64 */\n#endif\n\n#if defined(__x86_64__)\n typedef U64 reg_t; /* 64-bits in x32 mode */\n#else\n typedef size_t reg_t; /* 32-bits in x32 mode */\n#endif\n\ntypedef enum {\n notLimited = 0,\n limitedOutput = 1,\n fillOutput = 2\n} limitedOutput_directive;\n\n\n/*-************************************\n* Reading and writing into memory\n**************************************/\n\n/**\n * LZ4 relies on memcpy with a constant size being inlined. In freestanding\n * environments, the compiler can't assume the implementation of memcpy() is\n * standard compliant, so it can't apply its specialized memcpy() inlining\n * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze\n * memcpy() as if it were standard compliant, so it can inline it in freestanding\n * environments. This is needed when decompressing the Linux Kernel, for example.\n */\n#if !defined(LZ4_memcpy)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)\n# else\n# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)\n# endif\n#endif\n\n#if !defined(LZ4_memmove)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memmove __builtin_memmove\n# else\n# define LZ4_memmove memmove\n# endif\n#endif\n\nstatic unsigned LZ4_isLittleEndian(void)\n{\n const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */\n return one.c[0];\n}\n\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))\n#elif defined(_MSC_VER)\n#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))\n#endif\n\n#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)\n/* lie to the compiler about data alignment; use with caution */\n\nstatic U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }\nstatic U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }\nstatic reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }\n\n#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\nLZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;\nLZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;\nLZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;\n\nstatic U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }\nstatic U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }\nstatic reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }\n\n#else /* safe and portable access using memcpy() */\n\nstatic U16 LZ4_read16(const void* memPtr)\n{\n U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic U32 LZ4_read32(const void* memPtr)\n{\n U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic reg_t LZ4_read_ARCH(const void* memPtr)\n{\n reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic void LZ4_write16(void* memPtr, U16 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\nstatic void LZ4_write32(void* memPtr, U32 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* LZ4_FORCE_MEMORY_ACCESS */\n\n\nstatic U16 LZ4_readLE16(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read16(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U16)((U16)p[0] | (p[1]<<8));\n }\n}\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\nstatic U32 LZ4_readLE32(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read32(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);\n }\n}\n#endif\n\nstatic void LZ4_writeLE16(void* memPtr, U16 value)\n{\n if (LZ4_isLittleEndian()) {\n LZ4_write16(memPtr, value);\n } else {\n BYTE* p = (BYTE*)memPtr;\n p[0] = (BYTE) value;\n p[1] = (BYTE)(value>>8);\n }\n}\n\n/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */\nLZ4_FORCE_INLINE\nvoid LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */\nLZ4_FORCE_INLINE void\nLZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH\n * - there is at least 12 bytes available to write after dstEnd */\nLZ4_FORCE_INLINE void\nLZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)\n{\n BYTE v[8];\n\n assert(dstEnd >= dstPtr + MINMATCH);\n\n switch(offset) {\n case 1:\n MEM_INIT(v, *srcPtr, 8);\n break;\n case 2:\n LZ4_memcpy(v, srcPtr, 2);\n LZ4_memcpy(&v[2], srcPtr, 2);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(push)\n# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */\n#endif\n LZ4_memcpy(&v[4], v, 4);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(pop)\n#endif\n break;\n case 4:\n LZ4_memcpy(v, srcPtr, 4);\n LZ4_memcpy(&v[4], srcPtr, 4);\n break;\n default:\n LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);\n return;\n }\n\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n while (dstPtr < dstEnd) {\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n }\n}\n#endif\n\n\n/*-************************************\n* Common functions\n**************************************/\nstatic unsigned LZ4_NbCommonBytes (reg_t val)\n{\n assert(val != 0);\n if (LZ4_isLittleEndian()) {\n if (sizeof(val) == 8) {\n# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)\n/*-*************************************************************************************************\n* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.\n* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics\n* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.\n****************************************************************************************************/\n# if defined(__clang__) && (__clang_major__ < 10)\n /* Avoid undefined clang-cl intrinsics issue.\n * See https://github.com/lz4/lz4/pull/1017 for details. */\n return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;\n# else\n /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */\n return (unsigned)_tzcnt_u64(val) >> 3;\n# endif\n# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r = 0;\n _BitScanForward64(&r, (U64)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctzll((U64)val) >> 3;\n# else\n const U64 m = 0x0101010101010101ULL;\n val ^= val - 1;\n return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);\n# endif\n } else /* 32 bits */ {\n# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r;\n _BitScanForward(&r, (U32)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctz((U32)val) >> 3;\n# else\n const U32 m = 0x01010101;\n return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;\n# endif\n }\n } else /* Big Endian CPU */ {\n if (sizeof(val)==8) {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clzll((U64)val) >> 3;\n# else\n#if 1\n /* this method is probably faster,\n * but adds a 128 bytes lookup table */\n static const unsigned char ctz7_tab[128] = {\n 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n };\n U64 const mask = 0x0101010101010101ULL;\n U64 const t = (((val >> 8) - mask) | val) & mask;\n return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];\n#else\n /* this method doesn't consume memory space like the previous one,\n * but it contains several branches,\n * that may end up slowing execution */\n static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.\n Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.\n Note that this code path is never triggered in 32-bits mode. */\n unsigned r;\n if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }\n if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\n r += (!val);\n return r;\n#endif\n# endif\n } else /* 32 bits */ {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clz((U32)val) >> 3;\n# else\n val >>= 8;\n val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |\n (val + 0x00FF0000)) >> 24;\n return (unsigned)val ^ 3;\n# endif\n }\n }\n}\n\n\n#define STEPSIZE sizeof(reg_t)\nLZ4_FORCE_INLINE\nunsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)\n{\n const BYTE* const pStart = pIn;\n\n if (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) {\n pIn+=STEPSIZE; pMatch+=STEPSIZE;\n } else {\n return LZ4_NbCommonBytes(diff);\n } }\n\n while (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }\n pIn += LZ4_NbCommonBytes(diff);\n return (unsigned)(pIn - pStart);\n }\n\n if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }\n if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }\n if ((pIn compression run slower on incompressible data */\n\n\n/*-************************************\n* Local Structures and types\n**************************************/\ntypedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;\n\n/**\n * This enum distinguishes several different modes of accessing previous\n * content in the stream.\n *\n * - noDict : There is no preceding content.\n * - withPrefix64k : Table entries up to ctx->dictSize before the current blob\n * blob being compressed are valid and refer to the preceding\n * content (of length ctx->dictSize), which is available\n * contiguously preceding in memory the content currently\n * being compressed.\n * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere\n * else in memory, starting at ctx->dictionary with length\n * ctx->dictSize.\n * - usingDictCtx : Everything concerning the preceding content is\n * in a separate context, pointed to by ctx->dictCtx.\n * ctx->dictionary, ctx->dictSize, and table entries\n * in the current context that refer to positions\n * preceding the beginning of the current compression are\n * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx\n * ->dictSize describe the location and size of the preceding\n * content, and matches are found by looking in the ctx\n * ->dictCtx->hashTable.\n */\ntypedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;\ntypedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;\n\n\n/*-************************************\n* Local Utils\n**************************************/\nint LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }\nconst char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }\nint LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }\nint LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }\n\n\n/*-****************************************\n* Internal Definitions, used only in Tests\n*******************************************/\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);\n\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize);\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize);\n#if defined (__cplusplus)\n}\n#endif\n\n/*-******************************\n* Compression functions\n********************************/\nLZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)\n{\n if (tableType == byU16)\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));\n else\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)\n{\n const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;\n if (LZ4_isLittleEndian()) {\n const U64 prime5bytes = 889523592379ULL;\n return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));\n } else {\n const U64 prime8bytes = 11400714785074694791ULL;\n return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));\n }\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)\n{\n if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\n return LZ4_hash4(LZ4_readLE32(p), tableType);\n#else\n return LZ4_hash4(LZ4_read32(p), tableType);\n#endif\n}\n\nLZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: { /* illegal! */ assert(0); return; }\n case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }\n }\n}\n\nLZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: /* fallthrough */\n case byPtr: { /* illegal! */ assert(0); return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }\n }\n}\n\n/* LZ4_putPosition*() : only used in byPtr mode */\nLZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,\n void* tableBase, tableType_t const tableType)\n{\n const BYTE** const hashTable = (const BYTE**)tableBase;\n assert(tableType == byPtr); (void)tableType;\n hashTable[h] = p;\n}\n\nLZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n LZ4_putPositionOnHash(p, h, tableBase, tableType);\n}\n\n/* LZ4_getIndexOnHash() :\n * Index of match position registered in hash table.\n * hash position must be calculated by using base+index, or dictBase+index.\n * Assumption 1 : only valid if tableType == byU32 or byU16.\n * Assumption 2 : h is presumed valid (within limits of hash table)\n */\nLZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);\n if (tableType == byU32) {\n const U32* const hashTable = (const U32*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-2)));\n return hashTable[h];\n }\n if (tableType == byU16) {\n const U16* const hashTable = (const U16*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-1)));\n return hashTable[h];\n }\n assert(0); return 0; /* forbidden case */\n}\n\nstatic const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n assert(tableType == byPtr); (void)tableType;\n { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }\n}\n\nLZ4_FORCE_INLINE const BYTE*\nLZ4_getPosition(const BYTE* p,\n const void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n return LZ4_getPositionOnHash(h, tableBase, tableType);\n}\n\nLZ4_FORCE_INLINE void\nLZ4_prepareTable(LZ4_stream_t_internal* const cctx,\n const int inputSize,\n const tableType_t tableType) {\n /* If the table hasn't been used, it's guaranteed to be zeroed out, and is\n * therefore safe to use no matter what mode we're in. Otherwise, we figure\n * out if it's safe to leave as is or whether it needs to be reset.\n */\n if ((tableType_t)cctx->tableType != clearedTable) {\n assert(inputSize >= 0);\n if ((tableType_t)cctx->tableType != tableType\n || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)\n || ((tableType == byU32) && cctx->currentOffset > 1 GB)\n || tableType == byPtr\n || inputSize >= 4 KB)\n {\n DEBUGLOG(4, \"LZ4_prepareTable: Resetting table in %p\", cctx);\n MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);\n cctx->currentOffset = 0;\n cctx->tableType = (U32)clearedTable;\n } else {\n DEBUGLOG(4, \"LZ4_prepareTable: Re-use hash table (no reset)\");\n }\n }\n\n /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,\n * is faster than compressing without a gap.\n * However, compressing with currentOffset == 0 is faster still,\n * so we preserve that case.\n */\n if (cctx->currentOffset != 0 && tableType == byU32) {\n DEBUGLOG(5, \"LZ4_prepareTable: adding 64KB to currentOffset\");\n cctx->currentOffset += 64 KB;\n }\n\n /* Finally, clear history */\n cctx->dictCtx = NULL;\n cctx->dictionary = NULL;\n cctx->dictSize = 0;\n}\n\n/** LZ4_compress_generic_validated() :\n * inlined, to ensure branches are decided at compilation time.\n * The following conditions are presumed already validated:\n * - source != NULL\n * - inputSize > 0\n */\nLZ4_FORCE_INLINE int LZ4_compress_generic_validated(\n LZ4_stream_t_internal* const cctx,\n const char* const source,\n char* const dest,\n const int inputSize,\n int* inputConsumed, /* only written when outputDirective == fillOutput */\n const int maxOutputSize,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n int result;\n const BYTE* ip = (const BYTE*)source;\n\n U32 const startIndex = cctx->currentOffset;\n const BYTE* base = (const BYTE*)source - startIndex;\n const BYTE* lowLimit;\n\n const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;\n const BYTE* const dictionary =\n dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;\n const U32 dictSize =\n dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;\n const U32 dictDelta =\n (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */\n\n int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);\n U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */\n const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;\n const BYTE* anchor = (const BYTE*) source;\n const BYTE* const iend = ip + inputSize;\n const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;\n const BYTE* const matchlimit = iend - LASTLITERALS;\n\n /* the dictCtx currentOffset is indexed on the start of the dictionary,\n * while a dictionary in the current context precedes the currentOffset */\n const BYTE* dictBase = (dictionary == NULL) ? NULL :\n (dictDirective == usingDictCtx) ?\n dictionary + dictSize - dictCtx->currentOffset :\n dictionary + dictSize - startIndex;\n\n BYTE* op = (BYTE*) dest;\n BYTE* const olimit = op + maxOutputSize;\n\n U32 offset = 0;\n U32 forwardH;\n\n DEBUGLOG(5, \"LZ4_compress_generic_validated: srcSize=%i, tableType=%u\", inputSize, tableType);\n assert(ip != NULL);\n if (tableType == byU16) assert(inputSize= 1);\n\n lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);\n\n /* Update context state */\n if (dictDirective == usingDictCtx) {\n /* Subsequent linked blocks can't use the dictionary. */\n /* Instead, they use the block we just compressed. */\n cctx->dictCtx = NULL;\n cctx->dictSize = (U32)inputSize;\n } else {\n cctx->dictSize += (U32)inputSize;\n }\n cctx->currentOffset += (U32)inputSize;\n cctx->tableType = (U32)tableType;\n\n if (inputSizehashTable, byPtr);\n } else {\n LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);\n } }\n ip++; forwardH = LZ4_hashPosition(ip, tableType);\n\n /* Main Loop */\n for ( ; ; ) {\n const BYTE* match;\n BYTE* token;\n const BYTE* filledIp;\n\n /* Find a match */\n if (tableType == byPtr) {\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);\n\n } while ( (match+LZ4_DISTANCE_MAX < ip)\n || (LZ4_read32(match) != LZ4_read32(ip)) );\n\n } else { /* byU32, byU16 */\n\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n U32 const current = (U32)(forwardIp - base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex <= current);\n assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n matchIndex += dictDelta; /* make dictCtx index comparable with current context */\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else if (dictDirective == usingExtDict) {\n if (matchIndex < startIndex) {\n DEBUGLOG(7, \"extDict candidate: matchIndex=%5u < startIndex=%5u\", matchIndex, startIndex);\n assert(startIndex - matchIndex >= MINMATCH);\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else { /* single continuous memory segment */\n match = base + matchIndex;\n }\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n\n DEBUGLOG(7, \"candidate at pos=%u (offset=%u \\n\", matchIndex, current - matchIndex);\n if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */\n assert(matchIndex < current);\n if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))\n && (matchIndex+LZ4_DISTANCE_MAX < current)) {\n continue;\n } /* too far */\n assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */\n\n if (LZ4_read32(match) == LZ4_read32(ip)) {\n if (maybe_extMem) offset = current - matchIndex;\n break; /* match found */\n }\n\n } while(1);\n }\n\n /* Catch up */\n filledIp = ip;\n assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */\n if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {\n do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));\n }\n\n /* Encode Literals */\n { unsigned const litLength = (unsigned)(ip - anchor);\n token = op++;\n if ((outputDirective == limitedOutput) && /* Check output buffer overflow */\n (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n if ((outputDirective == fillOutput) &&\n (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {\n op--;\n goto _last_literals;\n }\n if (litLength >= RUN_MASK) {\n unsigned len = litLength - RUN_MASK;\n *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255;\n *op++ = (BYTE)len;\n }\n else *token = (BYTE)(litLength< olimit)) {\n /* the match was too close to the end, rewind and go to last literals */\n op = token;\n goto _last_literals;\n }\n\n /* Encode Offset */\n if (maybe_extMem) { /* static test */\n DEBUGLOG(6, \" with offset=%u (ext if > %i)\", offset, (int)(ip - (const BYTE*)source));\n assert(offset <= LZ4_DISTANCE_MAX && offset > 0);\n LZ4_writeLE16(op, (U16)offset); op+=2;\n } else {\n DEBUGLOG(6, \" with offset=%u (same segment)\", (U32)(ip - match));\n assert(ip-match <= LZ4_DISTANCE_MAX);\n LZ4_writeLE16(op, (U16)(ip - match)); op+=2;\n }\n\n /* Encode MatchLength */\n { unsigned matchCode;\n\n if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)\n && (lowLimit==dictionary) /* match within extDict */ ) {\n const BYTE* limit = ip + (dictEnd-match);\n assert(dictEnd > match);\n if (limit > matchlimit) limit = matchlimit;\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);\n ip += (size_t)matchCode + MINMATCH;\n if (ip==limit) {\n unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);\n matchCode += more;\n ip += more;\n }\n DEBUGLOG(6, \" with matchLength=%u starting in extDict\", matchCode+MINMATCH);\n } else {\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);\n ip += (size_t)matchCode + MINMATCH;\n DEBUGLOG(6, \" with matchLength=%u\", matchCode+MINMATCH);\n }\n\n if ((outputDirective) && /* Check output buffer overflow */\n (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {\n if (outputDirective == fillOutput) {\n /* Match description too long : reduce it */\n U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;\n ip -= matchCode - newMatchCode;\n assert(newMatchCode < matchCode);\n matchCode = newMatchCode;\n if (unlikely(ip <= filledIp)) {\n /* We have already filled up to filledIp so if ip ends up less than filledIp\n * we have positions in the hash table beyond the current position. This is\n * a problem if we reuse the hash table. So we have to remove these positions\n * from the hash table.\n */\n const BYTE* ptr;\n DEBUGLOG(5, \"Clearing %u positions\", (U32)(filledIp - ip));\n for (ptr = ip; ptr <= filledIp; ++ptr) {\n U32 const h = LZ4_hashPosition(ptr, tableType);\n LZ4_clearHash(h, cctx->hashTable, tableType);\n }\n }\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n if (matchCode >= ML_MASK) {\n *token += ML_MASK;\n matchCode -= ML_MASK;\n LZ4_write32(op, 0xFFFFFFFF);\n while (matchCode >= 4*255) {\n op+=4;\n LZ4_write32(op, 0xFFFFFFFF);\n matchCode -= 4*255;\n }\n op += matchCode / 255;\n *op++ = (BYTE)(matchCode % 255);\n } else\n *token += (BYTE)(matchCode);\n }\n /* Ensure we have enough space for the last literals. */\n assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));\n\n anchor = ip;\n\n /* Test end of chunk */\n if (ip >= mflimitPlusOne) break;\n\n /* Fill table */\n { U32 const h = LZ4_hashPosition(ip-2, tableType);\n if (tableType == byPtr) {\n LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);\n } else {\n U32 const idx = (U32)((ip-2) - base);\n LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);\n } }\n\n /* Test next position */\n if (tableType == byPtr) {\n\n match = LZ4_getPosition(ip, cctx->hashTable, tableType);\n LZ4_putPosition(ip, cctx->hashTable, tableType);\n if ( (match+LZ4_DISTANCE_MAX >= ip)\n && (LZ4_read32(match) == LZ4_read32(ip)) )\n { token=op++; *token=0; goto _next_match; }\n\n } else { /* byU32, byU16 */\n\n U32 const h = LZ4_hashPosition(ip, tableType);\n U32 const current = (U32)(ip-base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n matchIndex += dictDelta;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else if (dictDirective==usingExtDict) {\n if (matchIndex < startIndex) {\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else { /* single memory segment */\n match = base + matchIndex;\n }\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)\n && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))\n && (LZ4_read32(match) == LZ4_read32(ip)) ) {\n token=op++;\n *token=0;\n if (maybe_extMem) offset = current - matchIndex;\n DEBUGLOG(6, \"seq.start:%i, literals=%u, match.start:%i\",\n (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));\n goto _next_match;\n }\n }\n\n /* Prepare next loop */\n forwardH = LZ4_hashPosition(++ip, tableType);\n\n }\n\n_last_literals:\n /* Encode Last Literals */\n { size_t lastRun = (size_t)(iend - anchor);\n if ( (outputDirective) && /* Check output buffer overflow */\n (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {\n if (outputDirective == fillOutput) {\n /* adapt lastRun to fill 'dst' */\n assert(olimit >= op);\n lastRun = (size_t)(olimit-op) - 1/*token*/;\n lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n DEBUGLOG(6, \"Final literal run : %i literals\", (int)lastRun);\n if (lastRun >= RUN_MASK) {\n size_t accumulator = lastRun - RUN_MASK;\n *op++ = RUN_MASK << ML_BITS;\n for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;\n *op++ = (BYTE) accumulator;\n } else {\n *op++ = (BYTE)(lastRun< 0);\n DEBUGLOG(5, \"LZ4_compress_generic: compressed %i bytes into %i bytes\", inputSize, result);\n return result;\n}\n\n/** LZ4_compress_generic() :\n * inlined, to ensure branches are decided at compilation time;\n * takes care of src == (NULL, 0)\n * and forward the rest to LZ4_compress_generic_validated */\nLZ4_FORCE_INLINE int LZ4_compress_generic(\n LZ4_stream_t_internal* const cctx,\n const char* const src,\n char* const dst,\n const int srcSize,\n int *inputConsumed, /* only written when outputDirective == fillOutput */\n const int dstCapacity,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n DEBUGLOG(5, \"LZ4_compress_generic: srcSize=%i, dstCapacity=%i\",\n srcSize, dstCapacity);\n\n if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */\n if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */\n if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */\n DEBUGLOG(5, \"Generating an empty block\");\n assert(outputDirective == notLimited || dstCapacity >= 1);\n assert(dst != NULL);\n dst[0] = 0;\n if (outputDirective == fillOutput) {\n assert (inputConsumed != NULL);\n *inputConsumed = 0;\n }\n return 1;\n }\n assert(src != NULL);\n\n return LZ4_compress_generic_validated(cctx, src, dst, srcSize,\n inputConsumed, /* only written into if outputDirective == fillOutput */\n dstCapacity, outputDirective,\n tableType, dictDirective, dictIssue, acceleration);\n}\n\n\nint LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;\n assert(ctx != NULL);\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n if (maxOutputSize >= LZ4_compressBound(inputSize)) {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n/**\n * LZ4_compress_fast_extState_fastReset() :\n * A variant of LZ4_compress_fast_extState().\n *\n * Using this variant avoids an expensive initialization step. It is only safe\n * to call if the state buffer is known to be correctly initialized already\n * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of\n * \"correctly initialized\").\n */\nint LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n assert(ctx != NULL);\n\n if (dstCapacity >= LZ4_compressBound(srcSize)) {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n\nint LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)\n{\n int result;\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctxPtr == NULL) return 0;\n#else\n LZ4_stream_t ctx;\n LZ4_stream_t* const ctxPtr = &ctx;\n#endif\n result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctxPtr);\n#endif\n return result;\n}\n\n\nint LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);\n}\n\n\n/* Note!: This function leaves the stream in an unclean/broken state!\n * It is not safe to subsequently use the same state with a _fastReset() or\n * _continue() call without resetting it. */\nstatic int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n void* const s = LZ4_initStream(state, sizeof (*state));\n assert(s != NULL); (void)s;\n\n if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */\n return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);\n } else {\n if (*srcSizePtr < LZ4_64Klimit) {\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);\n } }\n}\n\nint LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);\n /* clean the state on exit */\n LZ4_initStream(state, sizeof (LZ4_stream_t));\n return r;\n}\n\n\nint LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)\n{\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctx == NULL) return 0;\n#else\n LZ4_stream_t ctxBody;\n LZ4_stream_t* const ctx = &ctxBody;\n#endif\n\n int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctx);\n#endif\n return result;\n}\n\n\n\n/*-******************************\n* Streaming functions\n********************************/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_stream_t* LZ4_createStream(void)\n{\n LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));\n LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));\n DEBUGLOG(4, \"LZ4_createStream %p\", lz4s);\n if (lz4s == NULL) return NULL;\n LZ4_initStream(lz4s, sizeof(*lz4s));\n return lz4s;\n}\n#endif\n\nstatic size_t LZ4_stream_t_alignment(void)\n{\n#if LZ4_ALIGN_TEST\n typedef struct { char c; LZ4_stream_t t; } t_a;\n return sizeof(t_a) - sizeof(LZ4_stream_t);\n#else\n return 1; /* effectively disabled */\n#endif\n}\n\nLZ4_stream_t* LZ4_initStream (void* buffer, size_t size)\n{\n DEBUGLOG(5, \"LZ4_initStream\");\n if (buffer == NULL) { return NULL; }\n if (size < sizeof(LZ4_stream_t)) { return NULL; }\n if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;\n MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));\n return (LZ4_stream_t*)buffer;\n}\n\n/* resetStream is now deprecated,\n * prefer initStream() which is more general */\nvoid LZ4_resetStream (LZ4_stream_t* LZ4_stream)\n{\n DEBUGLOG(5, \"LZ4_resetStream (ctx:%p)\", LZ4_stream);\n MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));\n}\n\nvoid LZ4_resetStream_fast(LZ4_stream_t* ctx) {\n LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nint LZ4_freeStream (LZ4_stream_t* LZ4_stream)\n{\n if (!LZ4_stream) return 0; /* support free on NULL */\n DEBUGLOG(5, \"LZ4_freeStream %p\", LZ4_stream);\n FREEMEM(LZ4_stream);\n return (0);\n}\n#endif\n\n\ntypedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;\n#define HASH_UNIT sizeof(reg_t)\nint LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,\n const char* dictionary, int dictSize,\n LoadDict_mode_e _ld)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n const tableType_t tableType = byU32;\n const BYTE* p = (const BYTE*)dictionary;\n const BYTE* const dictEnd = p + dictSize;\n U32 idx32;\n\n DEBUGLOG(4, \"LZ4_loadDict (%i bytes from %p into %p)\", dictSize, dictionary, LZ4_dict);\n\n /* It's necessary to reset the context,\n * and not just continue it with prepareTable()\n * to avoid any risk of generating overflowing matchIndex\n * when compressing using this dictionary */\n LZ4_resetStream(LZ4_dict);\n\n /* We always increment the offset by 64 KB, since, if the dict is longer,\n * we truncate it to the last 64k, and if it's shorter, we still want to\n * advance by a whole window length so we can provide the guarantee that\n * there are only valid offsets in the window, which allows an optimization\n * in LZ4_compress_fast_continue() where it uses noDictIssue even when the\n * dictionary isn't a full 64k. */\n dict->currentOffset += 64 KB;\n\n if (dictSize < (int)HASH_UNIT) {\n return 0;\n }\n\n if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;\n dict->dictionary = p;\n dict->dictSize = (U32)(dictEnd - p);\n dict->tableType = (U32)tableType;\n idx32 = dict->currentOffset - dict->dictSize;\n\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n /* Note: overwriting => favors positions end of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n p+=3; idx32+=3;\n }\n\n if (_ld == _ld_slow) {\n /* Fill hash table with additional references, to improve compression capability */\n p = dict->dictionary;\n idx32 = dict->currentOffset - dict->dictSize;\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n U32 const limit = dict->currentOffset - 64 KB;\n if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {\n /* Note: not overwriting => favors positions beginning of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n }\n p++; idx32++;\n }\n }\n\n return (int)dict->dictSize;\n}\n\nint LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);\n}\n\nint LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);\n}\n\nvoid LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)\n{\n const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :\n &(dictionaryStream->internal_donotuse);\n\n DEBUGLOG(4, \"LZ4_attach_dictionary (%p, %p, size %u)\",\n workingStream, dictionaryStream,\n dictCtx != NULL ? dictCtx->dictSize : 0);\n\n if (dictCtx != NULL) {\n /* If the current offset is zero, we will never look in the\n * external dictionary context, since there is no value a table\n * entry can take that indicate a miss. In that case, we need\n * to bump the offset to something non-zero.\n */\n if (workingStream->internal_donotuse.currentOffset == 0) {\n workingStream->internal_donotuse.currentOffset = 64 KB;\n }\n\n /* Don't actually attach an empty dictionary.\n */\n if (dictCtx->dictSize == 0) {\n dictCtx = NULL;\n }\n }\n workingStream->internal_donotuse.dictCtx = dictCtx;\n}\n\n\nstatic void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)\n{\n assert(nextSize >= 0);\n if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */\n /* rescale hash table */\n U32 const delta = LZ4_dict->currentOffset - 64 KB;\n const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;\n int i;\n DEBUGLOG(4, \"LZ4_renormDictT\");\n for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0;\n else LZ4_dict->hashTable[i] -= delta;\n }\n LZ4_dict->currentOffset = 64 KB;\n if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;\n LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;\n }\n}\n\n\nint LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,\n const char* source, char* dest,\n int inputSize, int maxOutputSize,\n int acceleration)\n{\n const tableType_t tableType = byU32;\n LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;\n const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;\n\n DEBUGLOG(5, \"LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)\", inputSize, streamPtr->dictSize);\n\n LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n\n /* invalidate tiny dictionaries */\n if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */\n && (dictEnd != source) /* prefix mode */\n && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */\n && (streamPtr->dictCtx == NULL) /* usingDictCtx */\n ) {\n DEBUGLOG(5, \"LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small\", streamPtr->dictSize, streamPtr->dictionary);\n /* remove dictionary existence from history, to employ faster prefix mode */\n streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)source;\n dictEnd = source;\n }\n\n /* Check overlapping input/dictionary space */\n { const char* const sourceEnd = source + inputSize;\n if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {\n streamPtr->dictSize = (U32)(dictEnd - sourceEnd);\n if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;\n if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;\n }\n }\n\n /* prefix mode : source data follows dictionary */\n if (dictEnd == source) {\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);\n else\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);\n }\n\n /* external dictionary mode */\n { int result;\n if (streamPtr->dictCtx) {\n /* We depend here on the fact that dictCtx'es (produced by\n * LZ4_loadDict) guarantee that their tables contain no references\n * to offsets between dictCtx->currentOffset - 64 KB and\n * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe\n * to use noDictIssue even when the dict isn't a full 64 KB.\n */\n if (inputSize > 4 KB) {\n /* For compressing large blobs, it is faster to pay the setup\n * cost to copy the dictionary's tables into the active context,\n * so that the compression loop is only looking into one table.\n */\n LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);\n }\n } else { /* small data <= 4 KB */\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n }\n }\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)inputSize;\n return result;\n }\n}\n\n\n/* Hidden debug function, to force-test external dictionary mode */\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;\n int result;\n\n LZ4_renormDictT(streamPtr, srcSize);\n\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);\n }\n\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)srcSize;\n\n return result;\n}\n\n\n/*! LZ4_saveDict() :\n * If previously compressed data block is not guaranteed to remain available at its memory location,\n * save it into a safer place (char* safeBuffer).\n * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,\n * one can therefore call LZ4_compress_fast_continue() right after.\n * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.\n */\nint LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n\n DEBUGLOG(5, \"LZ4_saveDict : dictSize=%i, safeBuffer=%p\", dictSize, safeBuffer);\n\n if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */\n if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }\n\n if (safeBuffer == NULL) assert(dictSize == 0);\n if (dictSize > 0) {\n const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;\n assert(dict->dictionary);\n LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);\n }\n\n dict->dictionary = (const BYTE*)safeBuffer;\n dict->dictSize = (U32)dictSize;\n\n return dictSize;\n}\n\n\n\n/*-*******************************\n * Decompression functions\n ********************************/\n\ntypedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;\n\n#undef MIN\n#define MIN(a,b) ( (a) < (b) ? (a) : (b) )\n\n\n/* variant for decompress_unsafe()\n * does not know end of input\n * presumes input is well formed\n * note : will consume at least one byte */\nstatic size_t read_long_length_no_check(const BYTE** pp)\n{\n size_t b, l = 0;\n do { b = **pp; (*pp)++; l += b; } while (b==255);\n DEBUGLOG(6, \"read_long_length_no_check: +length=%zu using %zu input bytes\", l, l/255 + 1)\n return l;\n}\n\n/* core decoder variant for LZ4_decompress_fast*()\n * for legacy support only : these entry points are deprecated.\n * - Presumes input is correctly formed (no defense vs malformed inputs)\n * - Does not know input size (presume input buffer is \"large enough\")\n * - Decompress a full block (only)\n * @return : nb of bytes read from input.\n * Note : this variant is not optimized for speed, just for maintenance.\n * the goal is to remove support of decompress_fast*() variants by v2.0\n**/\nLZ4_FORCE_INLINE int\nLZ4_decompress_unsafe_generic(\n const BYTE* const istart,\n BYTE* const ostart,\n int decompressedSize,\n\n size_t prefixSize,\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note: =0 if dictStart==NULL */\n )\n{\n const BYTE* ip = istart;\n BYTE* op = (BYTE*)ostart;\n BYTE* const oend = ostart + decompressedSize;\n const BYTE* const prefixStart = ostart - prefixSize;\n\n DEBUGLOG(5, \"LZ4_decompress_unsafe_generic\");\n if (dictStart == NULL) assert(dictSize == 0);\n\n while (1) {\n /* start new sequence */\n unsigned token = *ip++;\n\n /* literals */\n { size_t ll = token >> ML_BITS;\n if (ll==15) {\n /* long literal length */\n ll += read_long_length_no_check(&ip);\n }\n if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */\n LZ4_memmove(op, ip, ll); /* support in-place decompression */\n op += ll;\n ip += ll;\n if ((size_t)(oend-op) < MFLIMIT) {\n if (op==oend) break; /* end of block */\n DEBUGLOG(5, \"invalid: literals end at distance %zi from end of block\", oend-op);\n /* incorrect end of block :\n * last match must start at least MFLIMIT==12 bytes before end of output block */\n return -1;\n } }\n\n /* match */\n { size_t ml = token & 15;\n size_t const offset = LZ4_readLE16(ip);\n ip+=2;\n\n if (ml==15) {\n /* long literal length */\n ml += read_long_length_no_check(&ip);\n }\n ml += MINMATCH;\n\n if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */\n\n { const BYTE* match = op - offset;\n\n /* out of range */\n if (offset > (size_t)(op - prefixStart) + dictSize) {\n DEBUGLOG(6, \"offset out of range\");\n return -1;\n }\n\n /* check special case : extDict */\n if (offset > (size_t)(op - prefixStart)) {\n /* extDict scenario */\n const BYTE* const dictEnd = dictStart + dictSize;\n const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));\n size_t const extml = (size_t)(dictEnd - extMatch);\n if (extml > ml) {\n /* match entirely within extDict */\n LZ4_memmove(op, extMatch, ml);\n op += ml;\n ml = 0;\n } else {\n /* match split between extDict & prefix */\n LZ4_memmove(op, extMatch, extml);\n op += extml;\n ml -= extml;\n }\n match = prefixStart;\n }\n\n /* match copy - slow variant, supporting overlap copy */\n { size_t u;\n for (u=0; u= ipmax before start of loop. Returns initial_error if so.\n * @error (output) - error code. Must be set to 0 before call.\n**/\ntypedef size_t Rvl_t;\nstatic const Rvl_t rvl_error = (Rvl_t)(-1);\nLZ4_FORCE_INLINE Rvl_t\nread_variable_length(const BYTE** ip, const BYTE* ilimit,\n int initial_check)\n{\n Rvl_t s, length = 0;\n assert(ip != NULL);\n assert(*ip != NULL);\n assert(ilimit != NULL);\n if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */\n return rvl_error;\n }\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n if (likely(s != 255)) return length;\n do {\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n } while (s == 255);\n\n return length;\n}\n\n/*! LZ4_decompress_generic() :\n * This generic decompression function covers all use cases.\n * It shall be instantiated several times, using different sets of directives.\n * Note that it is important for performance that this function really get inlined,\n * in order to remove useless branches during compilation optimization.\n */\nLZ4_FORCE_INLINE int\nLZ4_decompress_generic(\n const char* const src,\n char* const dst,\n int srcSize,\n int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */\n\n earlyEnd_directive partialDecoding, /* full, partial */\n dict_directive dict, /* noDict, withPrefix64k, usingExtDict */\n const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note : = 0 if noDict */\n )\n{\n if ((src == NULL) || (outputSize < 0)) { return -1; }\n\n { const BYTE* ip = (const BYTE*) src;\n const BYTE* const iend = ip + srcSize;\n\n BYTE* op = (BYTE*) dst;\n BYTE* const oend = op + outputSize;\n BYTE* cpy;\n\n const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;\n\n const int checkOffset = (dictSize < (int)(64 KB));\n\n\n /* Set up the \"end\" pointers for the shortcut. */\n const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;\n const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;\n\n const BYTE* match;\n size_t offset;\n unsigned token;\n size_t length;\n\n\n DEBUGLOG(5, \"LZ4_decompress_generic (srcSize:%i, dstSize:%i)\", srcSize, outputSize);\n\n /* Special cases */\n assert(lowPrefix <= op);\n if (unlikely(outputSize==0)) {\n /* Empty output buffer */\n if (partialDecoding) return 0;\n return ((srcSize==1) && (*ip==0)) ? 0 : -1;\n }\n if (unlikely(srcSize==0)) { return -1; }\n\n /* LZ4_FAST_DEC_LOOP:\n * designed for modern OoO performance cpus,\n * where copying reliably 32-bytes is preferable to an unpredictable branch.\n * note : fast loop may show a regression for some client arm chips. */\n#if LZ4_FAST_DEC_LOOP\n if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(6, \"move to safe decode loop\");\n goto safe_decode;\n }\n\n /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using fast decode loop\");\n while (1) {\n /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */\n assert(oend - op >= FASTLOOP_SAFE_DISTANCE);\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) {\n DEBUGLOG(6, \"error reading long literal length\");\n goto _output_error;\n }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n\n /* copy literals */\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }\n LZ4_wildCopy32(op, ip, op+length);\n ip += length; op += length;\n } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {\n /* We don't need to check oend, since we check it once for each loop below */\n DEBUGLOG(7, \"copy %u bytes in a 16-bytes stripe\", (unsigned)length);\n /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */\n LZ4_memcpy(op, ip, 16);\n ip += length; op += length;\n } else {\n goto safe_literal_copy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n DEBUGLOG(6, \"blockPos%6u: offset = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)offset);\n match = op - offset;\n assert(match <= op); /* overflow check */\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \" match length token = %u (len==%u)\", (unsigned)length, (unsigned)length+MINMATCH);\n\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) {\n DEBUGLOG(5, \"error reading long match length\");\n goto _output_error;\n }\n length += addl;\n length += MINMATCH;\n DEBUGLOG(7, \" long match length == %u\", (unsigned)length);\n if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n goto safe_match_copy;\n }\n } else {\n length += MINMATCH;\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(7, \"moving to safe_match_copy (ml==%u)\", (unsigned)length);\n goto safe_match_copy;\n }\n\n /* Fastpath check: skip LZ4_wildCopy32 when true */\n if ((dict == withPrefix64k) || (match >= lowPrefix)) {\n if (offset >= 8) {\n assert(match >= lowPrefix);\n assert(match <= op);\n assert(op + 18 <= oend);\n\n LZ4_memcpy(op, match, 8);\n LZ4_memcpy(op+8, match+8, 8);\n LZ4_memcpy(op+16, match+16, 2);\n op += length;\n continue;\n } } }\n\n if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {\n DEBUGLOG(5, \"Error : pos=%zi, offset=%zi => outside buffers\", op-lowPrefix, op-match);\n goto _output_error;\n }\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) {\n DEBUGLOG(7, \"partialDecoding: dictionary match, close to dstEnd\");\n length = MIN(length, (size_t)(oend-op));\n } else {\n DEBUGLOG(6, \"end-of-block condition violated\")\n goto _output_error;\n } }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) { *op++ = *copyFrom++; }\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n\n /* copy match within block */\n cpy = op + length;\n\n assert((op <= oend) && (oend-op >= 32));\n if (unlikely(offset<16)) {\n LZ4_memcpy_using_offset(op, match, cpy, offset);\n } else {\n LZ4_wildCopy32(op, match, cpy);\n }\n\n op = cpy; /* wildcopy correction */\n }\n safe_decode:\n#endif\n\n /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using safe decode loop\");\n while (1) {\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* A two-stage shortcut for the most common case:\n * 1) If the literal length is 0..14, and there is enough space,\n * enter the shortcut and copy 16 bytes on behalf of the literals\n * (in the fast mode, only 8 bytes can be safely copied this way).\n * 2) Further if the match length is 4..18, copy 18 bytes in a similar\n * manner; but we ensure that there's enough space in the output for\n * those 18 bytes earlier, upon entering the shortcut (in other words,\n * there is a combined check for both stages).\n */\n if ( (length != RUN_MASK)\n /* strictly \"less than\" on input, to re-enter the loop with at least one byte */\n && likely((ip < shortiend) & (op <= shortoend)) ) {\n /* Copy the literals */\n LZ4_memcpy(op, ip, 16);\n op += length; ip += length;\n\n /* The second stage: prepare for match copying, decode full info.\n * If it doesn't work out, the info won't be wasted. */\n length = token & ML_MASK; /* match length */\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u (len=%u)\", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);\n offset = LZ4_readLE16(ip); ip += 2;\n match = op - offset;\n assert(match <= op); /* check overflow */\n\n /* Do not deal with overlapping matches. */\n if ( (length != ML_MASK)\n && (offset >= 8)\n && (dict==withPrefix64k || match >= lowPrefix) ) {\n /* Copy the match. */\n LZ4_memcpy(op + 0, match + 0, 8);\n LZ4_memcpy(op + 8, match + 8, 8);\n LZ4_memcpy(op +16, match +16, 2);\n op += length + MINMATCH;\n /* Both stages worked, load the next token. */\n continue;\n }\n\n /* The second stage didn't work out, but the info is ready.\n * Propel it right to the point of match copying. */\n goto _copy_match;\n }\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n }\n\n#if LZ4_FAST_DEC_LOOP\n safe_literal_copy:\n#endif\n /* copy literals */\n cpy = op+length;\n\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {\n /* We've either hit the input parsing restriction or the output parsing restriction.\n * In the normal scenario, decoding a full block, it must be the last sequence,\n * otherwise it's an error (invalid input or dimensions).\n * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.\n */\n if (partialDecoding) {\n /* Since we are partial decoding we may be in this block because of the output parsing\n * restriction, which is not valid since the output buffer is allowed to be undersized.\n */\n DEBUGLOG(7, \"partialDecoding: copying literals, close to input or output end\")\n DEBUGLOG(7, \"partialDecoding: literal length = %u\", (unsigned)length);\n DEBUGLOG(7, \"partialDecoding: remaining space in dstBuffer : %i\", (int)(oend - op));\n DEBUGLOG(7, \"partialDecoding: remaining space in srcBuffer : %i\", (int)(iend - ip));\n /* Finishing in the middle of a literals segment,\n * due to lack of input.\n */\n if (ip+length > iend) {\n length = (size_t)(iend-ip);\n cpy = op + length;\n }\n /* Finishing in the middle of a literals segment,\n * due to lack of output space.\n */\n if (cpy > oend) {\n cpy = oend;\n assert(op<=oend);\n length = (size_t)(oend-op);\n }\n } else {\n /* We must be on the last sequence (or invalid) because of the parsing limitations\n * so check that we exactly consume the input and don't overrun the output buffer.\n */\n if ((ip+length != iend) || (cpy > oend)) {\n DEBUGLOG(5, \"should have been last run of literals\")\n DEBUGLOG(5, \"ip(%p) + length(%i) = %p != iend (%p)\", ip, (int)length, ip+length, iend);\n DEBUGLOG(5, \"or cpy(%p) > (oend-MFLIMIT)(%p)\", cpy, oend-MFLIMIT);\n DEBUGLOG(5, \"after writing %u bytes / %i bytes available\", (unsigned)(op-(BYTE*)dst), outputSize);\n goto _output_error;\n }\n }\n LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */\n ip += length;\n op += length;\n /* Necessarily EOF when !partialDecoding.\n * When partialDecoding, it is EOF if we've either\n * filled the output buffer or\n * can't proceed with reading an offset for following match.\n */\n if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {\n break;\n }\n } else {\n LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */\n ip += length; op = cpy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n match = op - offset;\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n _copy_match:\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */\n }\n length += MINMATCH;\n\n#if LZ4_FAST_DEC_LOOP\n safe_match_copy:\n#endif\n if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) length = MIN(length, (size_t)(oend-op));\n else goto _output_error; /* doesn't respect parsing restriction */\n }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) *op++ = *copyFrom++;\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n assert(match >= lowPrefix);\n\n /* copy match within block */\n cpy = op + length;\n\n /* partialDecoding : may end anywhere within the block */\n assert(op<=oend);\n if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n size_t const mlen = MIN(length, (size_t)(oend-op));\n const BYTE* const matchEnd = match + mlen;\n BYTE* const copyEnd = op + mlen;\n if (matchEnd > op) { /* overlap copy */\n while (op < copyEnd) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, mlen);\n }\n op = copyEnd;\n if (op == oend) { break; }\n continue;\n }\n\n if (unlikely(offset<8)) {\n LZ4_write32(op, 0); /* silence msan warning when offset==0 */\n op[0] = match[0];\n op[1] = match[1];\n op[2] = match[2];\n op[3] = match[3];\n match += inc32table[offset];\n LZ4_memcpy(op+4, match, 4);\n match -= dec64table[offset];\n } else {\n LZ4_memcpy(op, match, 8);\n match += 8;\n }\n op += 8;\n\n if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);\n if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */\n if (op < oCopyLimit) {\n LZ4_wildCopy8(op, match, oCopyLimit);\n match += oCopyLimit - op;\n op = oCopyLimit;\n }\n while (op < cpy) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, 8);\n if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }\n }\n op = cpy; /* wildcopy correction */\n }\n\n /* end of decoding */\n DEBUGLOG(5, \"decoded %i bytes\", (int) (((char*)op)-dst));\n return (int) (((char*)op)-dst); /* Nb of output bytes decoded */\n\n /* Overflow error detected */\n _output_error:\n return (int) (-(((const char*)ip)-src))-1;\n }\n}\n\n\n/*===== Instantiate the API decoding functions. =====*/\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,\n decode_full_block, noDict,\n (BYTE*)dest, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,\n partial_decode,\n noDict, (BYTE*)dst, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_fast(const char* source, char* dest, int originalSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_fast\");\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, NULL, 0);\n}\n\n/*===== Instantiate a few more decoding cases, used more than once. =====*/\n\nLZ4_FORCE_O2 /* Exported, an obsolete API function. */\nint LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\n/* Another obsolete API function, paired with the previous one. */\nint LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,\n size_t prefixSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_safe_forceExtDict\");\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,\n const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, (const BYTE*)dictStart, dictSize);\n}\n\n/* The \"double dictionary\" mode, for use with e.g. ring buffers: the first part\n * of the dictionary is passed as prefix, and the second via dictStart + dictSize.\n * These routines are used only once, in LZ4_decompress_*_continue().\n */\nLZ4_FORCE_INLINE\nint LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize, const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);\n}\n\n/*===== streaming decompression functions =====*/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_streamDecode_t* LZ4_createStreamDecode(void)\n{\n LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));\n return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));\n}\n\nint LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)\n{\n if (LZ4_stream == NULL) { return 0; } /* support free on NULL */\n FREEMEM(LZ4_stream);\n return 0;\n}\n#endif\n\n/*! LZ4_setStreamDecode() :\n * Use this function to instruct where to find the dictionary.\n * This function is not necessary if previous data is still available where it was decoded.\n * Loading a size of 0 is allowed (same effect as no dictionary).\n * @return : 1 if OK, 0 if error\n */\nint LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n lz4sd->prefixSize = (size_t)dictSize;\n if (dictSize) {\n assert(dictionary != NULL);\n lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;\n } else {\n lz4sd->prefixEnd = (const BYTE*) dictionary;\n }\n lz4sd->externalDict = NULL;\n lz4sd->extDictSize = 0;\n return 1;\n}\n\n/*! LZ4_decoderRingBufferSize() :\n * when setting a ring buffer for streaming decompression (optional scenario),\n * provides the minimum size of this ring buffer\n * to be compatible with any source respecting maxBlockSize condition.\n * Note : in a ring buffer scenario,\n * blocks are presumed decompressed next to each other.\n * When not enough space remains for next block (remainingSize < maxBlockSize),\n * decoding resumes from beginning of ring buffer.\n * @return : minimum ring buffer size,\n * or 0 if there is an error (invalid maxBlockSize).\n */\nint LZ4_decoderRingBufferSize(int maxBlockSize)\n{\n if (maxBlockSize < 0) return 0;\n if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;\n if (maxBlockSize < 16) maxBlockSize = 16;\n return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);\n}\n\n/*\n*_continue() :\n These decoding functions allow decompression of multiple blocks in \"streaming\" mode.\n Previously decoded blocks must still be available at the memory position where they were decoded.\n If it's not possible, save the relevant part of decoded data into a safe buffer,\n and indicate where it stands using LZ4_setStreamDecode()\n*/\nLZ4_FORCE_O2\nint LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n int result;\n\n if (lz4sd->prefixSize == 0) {\n /* The first call, no dictionary yet. */\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n /* They're rolling the current segment. */\n if (lz4sd->prefixSize >= 64 KB - 1)\n result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n else if (lz4sd->extDictSize == 0)\n result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize);\n else\n result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)result;\n lz4sd->prefixEnd += result;\n } else {\n /* The buffer wraps around, or they're switching to another buffer. */\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n }\n\n return result;\n}\n\nLZ4_FORCE_O2 int\nLZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,\n const char* source, char* dest, int originalSize)\n{\n LZ4_streamDecode_t_internal* const lz4sd =\n (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);\n int result;\n\n DEBUGLOG(5, \"LZ4_decompress_fast_continue (toDecodeSize=%i)\", originalSize);\n assert(originalSize >= 0);\n\n if (lz4sd->prefixSize == 0) {\n DEBUGLOG(5, \"first invocation : no prefix nor extDict\");\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_fast(source, dest, originalSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n DEBUGLOG(5, \"continue using existing prefix\");\n result = LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n lz4sd->prefixSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)originalSize;\n lz4sd->prefixEnd += originalSize;\n } else {\n DEBUGLOG(5, \"prefix becomes extDict\");\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_fast_extDict(source, dest, originalSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n }\n\n return result;\n}\n\n\n/*\nAdvanced decoding functions :\n*_usingDict() :\n These decoding functions work the same as \"_continue\" ones,\n the dictionary must be explicitly provided within parameters\n*/\n\nint LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0 || dictStart+dictSize == dest)\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n (size_t)dictSize, NULL, 0);\n assert(dictSize >= 0);\n return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);\n}\n\n\n/*=*************************************************\n* Obsolete Functions\n***************************************************/\n/* obsolete compression functions */\nint LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)\n{\n return LZ4_compress_default(source, dest, inputSize, maxOutputSize);\n}\nint LZ4_compress(const char* src, char* dest, int srcSize)\n{\n return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));\n}\nint LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);\n}\nint LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);\n}\nint LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);\n}\nint LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)\n{\n return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);\n}\n\n/*\nThese decompression functions are deprecated and should no longer be used.\nThey are only provided here for compatibility with older user programs.\n- LZ4_uncompress is totally equivalent to LZ4_decompress_fast\n- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe\n*/\nint LZ4_uncompress (const char* source, char* dest, int outputSize)\n{\n return LZ4_decompress_fast(source, dest, outputSize);\n}\nint LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)\n{\n return LZ4_decompress_safe(source, dest, isize, maxOutputSize);\n}\n\n/* Obsolete Streaming functions */\n\nint LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }\n\nint LZ4_resetStreamState(void* state, char* inputBuffer)\n{\n (void)inputBuffer;\n LZ4_resetStream((LZ4_stream_t*)state);\n return 0;\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nvoid* LZ4_create (char* inputBuffer)\n{\n (void)inputBuffer;\n return LZ4_createStream();\n}\n#endif\n\nchar* LZ4_slideInputBuffer (void* state)\n{\n /* avoid const char * -> char * conversion warning */\n return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;\n}\n\n#endif /* LZ4_COMMONDEFS_ONLY */\n"], ["/pogocache/src/net.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit net.c provides most network functionality, including listening on ports,\n// thread creation, event queue handling, and reading & writing sockets.\n#define _GNU_SOURCE\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#ifdef __linux__\n#include \n#include \n#include \n#include \n#else\n#include \n#endif\n\n#include \"uring.h\"\n#include \"stats.h\"\n#include \"net.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"xmalloc.h\"\n\n#define PACKETSIZE 16384\n#define MINURINGEVENTS 2 // there must be at least 2 events for uring use\n\nextern const int verb;\n\nstatic int setnonblock(int fd) {\n int flags = fcntl(fd, F_GETFL, 0);\n if (flags == -1) {\n return -1;\n }\n return fcntl(fd, F_SETFL, flags | O_NONBLOCK);\n}\n\nstatic int settcpnodelay(int fd, bool nodelay) {\n int val = nodelay;\n return setsockopt(fd, SOL_SOCKET, TCP_NODELAY, &val, sizeof(val)) == 0;\n}\n\nstatic int setquickack(int fd, bool quickack) {\n#if defined(__linux__)\n int val = quickack;\n return setsockopt(fd, SOL_SOCKET, TCP_QUICKACK, &val, sizeof(val)) == 0;\n#else\n (void)fd, (void)quickack;\n return 0;\n#endif\n}\n\nstatic int setkeepalive(int fd, bool keepalive) {\n int val = keepalive;\n if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val))) {\n return -1;\n }\n#if defined(__linux__)\n if (!keepalive) {\n return 0;\n }\n // tcp_keepalive_time\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &(int){300}, sizeof(int))) \n {\n return -1;\n }\n // tcp_keepalive_intvl\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &(int){30}, sizeof(int)))\n {\n return -1;\n }\n // tcp_keepalive_probes\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &(int){3}, sizeof(int))) {\n return -1;\n }\n#endif\n return 0;\n}\n\n#ifdef __linux__\ntypedef struct epoll_event event_t;\n#else\ntypedef struct kevent event_t;\n#endif\n\nstatic int event_fd(event_t *ev) {\n#ifdef __linux__\n return ev->data.fd;\n#else\n return ev->ident;\n#endif\n}\n\nstatic int getevents(int fd, event_t evs[], int nevs, bool wait_forever, \n int64_t timeout)\n{\n if (wait_forever) {\n#ifdef __linux__\n return epoll_wait(fd, evs, nevs, -1);\n#else\n return kevent(fd, NULL, 0, evs, nevs, 0);\n#endif\n } else {\n timeout = timeout < 0 ? 0 : \n timeout > 900000000 ? 900000000 : // 900ms\n timeout;\n#ifdef __linux__\n timeout = timeout / 1000000;\n return epoll_wait(fd, evs, nevs, timeout);\n#else\n struct timespec timespec = { .tv_nsec = timeout };\n return kevent(fd, NULL, 0, evs, nevs, ×pec);\n#endif\n }\n}\n\nstatic int addread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN | EPOLLEXCLUSIVE;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int addwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int evqueue(void) {\n#ifdef __linux__\n return epoll_create1(0);\n#else\n return kqueue();\n#endif\n}\n\nstruct bgworkctx { \n void (*work)(void *udata);\n void (*done)(struct net_conn *conn, void *udata);\n struct net_conn *conn;\n void *udata;\n bool writer;\n};\n\n// static void bgdone(struct bgworkctx *bgctx);\n\nstruct net_conn {\n int fd;\n struct net_conn *next; // for hashmap bucket\n bool closed;\n struct tls *tls;\n void *udata;\n char *out;\n size_t outlen;\n size_t outcap;\n struct bgworkctx *bgctx;\n struct qthreadctx *ctx;\n unsigned stat_cmd_get;\n unsigned stat_cmd_set;\n unsigned stat_get_hits;\n unsigned stat_get_misses;\n};\n\nstatic struct net_conn *conn_new(int fd, struct qthreadctx *ctx) {\n struct net_conn *conn = xmalloc(sizeof(struct net_conn));\n memset(conn, 0, sizeof(struct net_conn));\n conn->fd = fd;\n conn->ctx = ctx;\n return conn;\n}\n\nstatic void conn_free(struct net_conn *conn) {\n if (conn) {\n if (conn->out) {\n xfree(conn->out);\n }\n xfree(conn);\n }\n}\n\nvoid net_conn_out_ensure(struct net_conn *conn, size_t amount) {\n if (conn->outcap-conn->outlen >= amount) {\n return;\n }\n size_t cap = conn->outcap == 0 ? 16 : conn->outcap * 2;\n while (cap-conn->outlen < amount) {\n cap *= 2;\n }\n char *out = xmalloc(cap);\n memcpy(out, conn->out, conn->outlen);\n xfree(conn->out);\n conn->out = out;\n conn->outcap = cap;\n}\n\nvoid net_conn_out_write_byte_nocheck(struct net_conn *conn, char byte) {\n conn->out[conn->outlen++] = byte;\n}\n\nvoid net_conn_out_write_byte(struct net_conn *conn, char byte) {\n if (conn->outcap == conn->outlen) {\n net_conn_out_ensure(conn, 1);\n }\n net_conn_out_write_byte_nocheck(conn, byte);\n}\n\nvoid net_conn_out_write_nocheck(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n memcpy(conn->out+conn->outlen, data, nbytes);\n conn->outlen += nbytes;\n}\n\nvoid net_conn_out_write(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n if (conn->outcap-conn->outlen < nbytes) {\n net_conn_out_ensure(conn, nbytes);\n }\n net_conn_out_write_nocheck(conn, data, nbytes);\n}\n\nchar *net_conn_out(struct net_conn *conn) {\n return conn->out;\n}\n\nsize_t net_conn_out_len(struct net_conn *conn) {\n return conn->outlen;\n}\n\nsize_t net_conn_out_cap(struct net_conn *conn) {\n return conn->outcap;\n}\n\nvoid net_conn_out_setlen(struct net_conn *conn, size_t len) {\n assert(len < conn->outcap);\n conn->outlen = len;\n}\n\n\nbool net_conn_isclosed(struct net_conn *conn) {\n return conn->closed;\n}\n\nvoid net_conn_close(struct net_conn *conn) {\n conn->closed = true;\n}\n\nvoid net_conn_setudata(struct net_conn *conn, void *udata) {\n conn->udata = udata;\n}\n\nvoid *net_conn_udata(struct net_conn *conn) {\n return conn->udata;\n}\n\nstatic uint64_t hashfd(int fd) {\n return mix13((uint64_t)fd);\n}\n\n// map of connections\nstruct cmap {\n struct net_conn **buckets;\n size_t nbuckets;\n size_t len;\n};\n\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn);\n\nstatic void cmap_grow(struct cmap *cmap) {\n struct cmap cmap2 = { 0 };\n cmap2.nbuckets = cmap->nbuckets*2;\n size_t size = cmap2.nbuckets * sizeof(struct net_conn*);\n cmap2.buckets = xmalloc(size);\n memset(cmap2.buckets, 0, cmap2.nbuckets*sizeof(struct net_conn*));\n for (size_t i = 0; i < cmap->nbuckets; i++) {\n struct net_conn *conn = cmap->buckets[i];\n while (conn) {\n struct net_conn *next = conn->next;\n conn->next = 0;\n cmap_insert(&cmap2, conn);\n conn = next;\n }\n }\n xfree(cmap->buckets);\n memcpy(cmap, &cmap2, sizeof(struct cmap));\n}\n\n// Insert a connection into a map. \n// The connection MUST NOT exist in the map.\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n if (cmap->len >= cmap->nbuckets-(cmap->nbuckets>>2)) { // 75% load factor\n // if (cmap->len >= cmap->nbuckets) { // 100% load factor\n cmap_grow(cmap);\n }\n size_t i = hash % cmap->nbuckets;\n conn->next = cmap->buckets[i];\n cmap->buckets[i] = conn;\n cmap->len++;\n}\n\n// Return the connection or NULL if not exists.\nstatic struct net_conn *cmap_get(struct cmap *cmap, int fd) {\n uint32_t hash = hashfd(fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *conn = cmap->buckets[i];\n while (conn && conn->fd != fd) {\n conn = conn->next;\n }\n return conn;\n}\n\n// Delete connection from map. \n// The connection MUST exist in the map.\nstatic void cmap_delete(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *prev = 0;\n struct net_conn *iter = cmap->buckets[i];\n while (iter != conn) {\n prev = iter;\n iter = iter->next;\n }\n if (prev) {\n prev->next = iter->next;\n } else {\n cmap->buckets[i] = iter->next;\n }\n}\n\nstatic atomic_size_t nconns = 0;\nstatic atomic_size_t tconns = 0;\nstatic atomic_size_t rconns = 0;\n\nstatic pthread_mutex_t tls_ready_fds_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic int tls_ready_fds_cap = 0;\nstatic int tls_ready_fds_len = 0;\nstatic int *tls_ready_fds = 0;\n\nstatic void save_tls_fd(int fd) {\n pthread_mutex_lock(&tls_ready_fds_lock);\n if (tls_ready_fds_len == tls_ready_fds_cap) {\n tls_ready_fds_cap *= 2;\n if (tls_ready_fds_cap == 0) {\n tls_ready_fds_cap = 8;\n }\n tls_ready_fds = xrealloc(tls_ready_fds, tls_ready_fds_cap*sizeof(int));\n }\n tls_ready_fds[tls_ready_fds_len++] = fd;\n pthread_mutex_unlock(&tls_ready_fds_lock);\n}\n\nstatic bool del_tls_fd(int fd) {\n bool found = false;\n pthread_mutex_lock(&tls_ready_fds_lock);\n for (int i = 0; i < tls_ready_fds_len; i++) {\n if (tls_ready_fds[i] == fd) {\n tls_ready_fds[i] = tls_ready_fds[tls_ready_fds_len-1];\n tls_ready_fds_len--;\n found = true;\n break;\n }\n }\n pthread_mutex_unlock(&tls_ready_fds_lock);\n return found;\n}\n\nstruct qthreadctx {\n pthread_t th;\n int qfd;\n int index;\n int maxconns;\n int *sfd; // three entries\n bool tcpnodelay;\n bool keepalive;\n bool quickack;\n int queuesize;\n const char *unixsock;\n void *udata;\n bool uring;\n#ifndef NOURING\n struct io_uring ring;\n#endif\n void(*data)(struct net_conn*,const void*,size_t,void*);\n void(*opened)(struct net_conn*,void*);\n void(*closed)(struct net_conn*,void*);\n int nevents;\n event_t *events;\n atomic_int nconns;\n int ntlsconns;\n char *inpkts;\n struct net_conn **qreads;\n struct net_conn **qins;\n struct net_conn **qattachs;\n struct net_conn **qouts;\n struct net_conn **qcloses;\n char **qinpkts;\n int *qinpktlens; \n int nqreads;\n int nqins;\n int nqcloses;\n int nqattachs;\n int nqouts;\n int nthreads;\n \n uint64_t stat_cmd_get;\n uint64_t stat_cmd_set;\n uint64_t stat_get_hits;\n uint64_t stat_get_misses;\n\n struct qthreadctx *ctxs;\n struct cmap cmap;\n};\n\nstatic atomic_uint_fast64_t g_stat_cmd_get = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_set = 0;\nstatic atomic_uint_fast64_t g_stat_get_hits = 0;\nstatic atomic_uint_fast64_t g_stat_get_misses = 0;\n\ninline\nstatic void sumstats(struct net_conn *conn, struct qthreadctx *ctx) {\n ctx->stat_cmd_get += conn->stat_cmd_get;\n conn->stat_cmd_get = 0;\n ctx->stat_cmd_set += conn->stat_cmd_set;\n conn->stat_cmd_set = 0;\n ctx->stat_get_hits += conn->stat_get_hits;\n conn->stat_get_hits = 0;\n ctx->stat_get_misses += conn->stat_get_misses;\n conn->stat_get_misses = 0;\n}\n\ninline\nstatic void sumstats_global(struct qthreadctx *ctx) {\n atomic_fetch_add_explicit(&g_stat_cmd_get, ctx->stat_cmd_get, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_get = 0;\n atomic_fetch_add_explicit(&g_stat_cmd_set, ctx->stat_cmd_set, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_set = 0;\n atomic_fetch_add_explicit(&g_stat_get_hits, ctx->stat_get_hits, \n __ATOMIC_RELAXED);\n ctx->stat_get_hits = 0;\n atomic_fetch_add_explicit(&g_stat_get_misses, ctx->stat_get_misses, \n __ATOMIC_RELAXED);\n ctx->stat_get_misses = 0;\n}\n\nuint64_t stat_cmd_get(void) {\n uint64_t x = atomic_load_explicit(&g_stat_cmd_get, __ATOMIC_RELAXED);\n return x;\n}\n\nuint64_t stat_cmd_set(void) {\n return atomic_load_explicit(&g_stat_cmd_set, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_hits(void) {\n return atomic_load_explicit(&g_stat_get_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_misses(void) {\n return atomic_load_explicit(&g_stat_get_misses, __ATOMIC_RELAXED);\n}\n\ninline\nstatic void qreset(struct qthreadctx *ctx) {\n ctx->nqreads = 0;\n ctx->nqins = 0;\n ctx->nqcloses = 0;\n ctx->nqouts = 0;\n ctx->nqattachs = 0;\n}\n\ninline\nstatic void qaccept(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nevents; i++) {\n int fd = event_fd(&ctx->events[i]);\n struct net_conn *conn = cmap_get(&ctx->cmap, fd);\n if (!conn) {\n if ((fd == ctx->sfd[0] || fd == ctx->sfd[1] || fd == ctx->sfd[2])) {\n int sfd = fd;\n fd = accept(fd, 0, 0);\n if (fd == -1) {\n continue;\n }\n if (setnonblock(fd) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[0] || sfd == ctx->sfd[2]) {\n if (setkeepalive(fd, ctx->keepalive) == -1) {\n close(fd);\n continue;\n }\n if (settcpnodelay(fd, ctx->tcpnodelay) == -1) {\n close(fd);\n continue;\n }\n if (setquickack(fd, ctx->quickack) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[2]) {\n save_tls_fd(fd);\n }\n }\n static atomic_uint_fast64_t next_ctx_index = 0;\n int idx = atomic_fetch_add(&next_ctx_index, 1) % ctx->nthreads;\n if (addread(ctx->ctxs[idx].qfd, fd) == -1) {\n if (sfd == ctx->sfd[2]) {\n del_tls_fd(fd);\n }\n close(fd);\n continue;\n }\n continue;\n }\n size_t xnconns = atomic_fetch_add(&nconns, 1);\n if (xnconns >= (size_t)ctx->maxconns) {\n // rejected\n atomic_fetch_add(&rconns, 1);\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n continue;\n }\n bool istls = del_tls_fd(fd);\n conn = conn_new(fd, ctx);\n if (istls) {\n if (!tls_accept(conn->fd, &conn->tls)) {\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n conn_free(conn);\n continue;\n }\n ctx->ntlsconns++;\n }\n atomic_fetch_add_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_add_explicit(&tconns, 1, __ATOMIC_RELEASE);\n cmap_insert(&ctx->cmap, conn);\n ctx->opened(conn, ctx->udata);\n }\n if (conn->bgctx) {\n // BGWORK(2)\n // The connection has been added back to the event loop, but it\n // needs to be attached and restated.\n ctx->qattachs[ctx->nqattachs++] = conn;\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void handle_read(ssize_t n, char *pkt, struct net_conn *conn,\n struct qthreadctx *ctx)\n{\n assert(conn->outlen == 0);\n assert(conn->bgctx == 0);\n if (n <= 0) {\n if (n == 0 || errno != EAGAIN) {\n // read failed, close connection\n ctx->qcloses[ctx->nqcloses++] = conn;\n return;\n }\n assert(n == -1 && errno == EAGAIN);\n // even though there's an EAGAIN, still call the user data event\n // handler with an empty packet \n n = 0;\n }\n pkt[n] = '\\0';\n ctx->qins[ctx->nqins] = conn;\n ctx->qinpkts[ctx->nqins] = pkt;\n ctx->qinpktlens[ctx->nqins] = n;\n ctx->nqins++;\n}\n\ninline \nstatic void flush_conn(struct net_conn *conn, size_t written) {\n while (written < conn->outlen) {\n ssize_t n;\n if (conn->tls) {\n n = tls_write(conn->tls, conn->fd, conn->out+written, \n conn->outlen-written);\n } else {\n n = write(conn->fd, conn->out+written, conn->outlen-written);\n }\n if (n == -1) {\n if (errno == EAGAIN) {\n continue;\n }\n conn->closed = true;\n break;\n }\n written += n;\n }\n // either everything was written or the socket is closed\n conn->outlen = 0;\n}\n\ninline\nstatic void qattach(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nqattachs; i++) {\n // BGWORK(3)\n // A bgworker has finished, make sure it's added back into the \n // event loop in the correct state.\n struct net_conn *conn = ctx->qattachs[i];\n struct bgworkctx *bgctx = conn->bgctx;\n bgctx->done(conn, bgctx->udata);\n conn->bgctx = 0;\n assert(bgctx);\n xfree(bgctx);\n int ret = delwrite(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n ret = addread(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void qread(struct qthreadctx *ctx) {\n // Read incoming socket data\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // read incoming using uring\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_read(sqe, conn->fd, pkt, PACKETSIZE-1, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n assert(ret == ctx->nqreads);\n for (int i = 0; i < ctx->nqreads; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n = cqe->res;\n if (n < 0) {\n errno = -n;\n n = -1;\n }\n handle_read(n, pkt, conn, ctx);\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // read incoming data using standard syscalls.\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n;\n if (conn->tls) {\n n = tls_read(conn->tls, conn->fd, pkt, PACKETSIZE-1);\n } else {\n n = read(conn->fd, pkt, PACKETSIZE-1);\n }\n handle_read(n, pkt, conn, ctx);\n }\n#ifndef NOURING\n }\n#endif\n}\n\n\ninline\nstatic void qprocess(struct qthreadctx *ctx) {\n // process all new incoming data\n for (int i = 0; i < ctx->nqins; i++) {\n struct net_conn *conn = ctx->qins[i];\n char *p = ctx->qinpkts[i];\n int n = ctx->qinpktlens[i];\n ctx->data(conn, p, n, ctx->udata);\n sumstats(conn, ctx);\n if (conn->bgctx) {\n // BGWORK(1)\n // Connection entered background mode.\n // This means the connection is no longer in the event queue but\n // is still owned by this qthread. Once the bgwork is done the \n // connection will be added back to the queue with addwrite.\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n}\n\ninline\nstatic void qprewrite(struct qthreadctx *ctx) {\n (void)ctx;\n // TODO: perform any prewrite operations\n}\n\ninline\nstatic void qwrite(struct qthreadctx *ctx) {\n // Flush all outgoing socket data.\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // write outgoing using uring\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_write(sqe, conn->fd, conn->out, conn->outlen, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n for (int i = 0; i < ctx->nqouts; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qouts[i];\n ssize_t n = cqe->res;\n if (n == -EAGAIN) {\n n = 0;\n }\n if (n < 0) {\n conn->closed = true;\n } else {\n // Any extra data must be flushed using syscall write.\n flush_conn(conn, n);\n }\n // Either everything was written or the socket is closed\n conn->outlen = 0;\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // Write data using write syscall\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n#ifndef NOURING\n }\n#endif\n}\n\ninline\nstatic void qclose(struct qthreadctx *ctx) {\n // Close all sockets that need to be closed\n for (int i = 0; i < ctx->nqcloses; i++) {\n struct net_conn *conn = ctx->qcloses[i];\n ctx->closed(conn, ctx->udata);\n if (conn->tls) {\n tls_close(conn->tls, conn->fd);\n ctx->ntlsconns--;\n } else {\n close(conn->fd);\n }\n cmap_delete(&ctx->cmap, conn);\n atomic_fetch_sub_explicit(&nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_sub_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n conn_free(conn);\n }\n}\n\nstatic void *qthread(void *arg) {\n struct qthreadctx *ctx = arg;\n#ifndef NOURING\n if (ctx->uring) {\n if (io_uring_queue_init(ctx->queuesize, &ctx->ring, 0) < 0) {\n perror(\"# io_uring_queue_init\");\n abort();\n }\n }\n#endif\n // connection map\n memset(&ctx->cmap, 0, sizeof(struct cmap));\n ctx->cmap.nbuckets = 64;\n size_t size = ctx->cmap.nbuckets*sizeof(struct net_conn*);\n ctx->cmap.buckets = xmalloc(size);\n memset(ctx->cmap.buckets, 0, ctx->cmap.nbuckets*sizeof(struct net_conn*));\n\n ctx->events = xmalloc(sizeof(event_t)*ctx->queuesize);\n ctx->qreads = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->inpkts = xmalloc(PACKETSIZE*ctx->queuesize);\n ctx->qins = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qinpkts = xmalloc(sizeof(char*)*ctx->queuesize);\n ctx->qinpktlens = xmalloc(sizeof(int)*ctx->queuesize);\n ctx->qcloses = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qouts = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qattachs = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n\n while (1) {\n sumstats_global(ctx);\n ctx->nevents = getevents(ctx->qfd, ctx->events, ctx->queuesize, 1, 0);\n if (ctx->nevents <= 0) {\n if (ctx->nevents == -1 && errno != EINTR) {\n perror(\"# getevents\");\n abort();\n }\n continue;\n }\n // reset, accept, attach, read, process, prewrite, write, close\n qreset(ctx); // reset the step queues\n qaccept(ctx); // accept incoming connections\n qattach(ctx); // attach bg workers. uncommon\n qread(ctx); // read from sockets\n qprocess(ctx); // process new socket data\n qprewrite(ctx); // perform any prewrite operations, such as fsync\n qwrite(ctx); // write to sockets\n qclose(ctx); // close any sockets that need closing\n }\n return 0;\n}\n\nstatic int listen_tcp(const char *host, const char *port, bool reuseport, \n int backlog)\n{\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return 0;\n }\n int ret;\n host = host ? host : \"127.0.0.1\";\n port = port ? port : \"0\";\n struct addrinfo hints = { 0 }, *addrs;\n hints.ai_family = AF_UNSPEC; \n hints.ai_socktype = SOCK_STREAM;\n hints.ai_protocol = IPPROTO_TCP;\n ret = getaddrinfo(host, port, &hints, &addrs);\n if (ret != 0) {\n fprintf(stderr, \"# getaddrinfo: %s: %s:%s\", gai_strerror(ret), host,\n port);\n abort();\n }\n struct addrinfo *ainfo = addrs;\n while (ainfo->ai_family != PF_INET) {\n ainfo = ainfo->ai_next;\n }\n assert(ainfo);\n int fd = socket(ainfo->ai_family, ainfo->ai_socktype, ainfo->ai_protocol);\n if (fd == -1) {\n perror(\"# socket(tcp)\");\n abort();\n }\n if (reuseport) {\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &(int){1}, \n sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseport)\");\n abort();\n }\n }\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &(int){1},sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseaddr)\");\n abort();\n }\n ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n ret = bind(fd, ainfo->ai_addr, ainfo->ai_addrlen);\n if (ret == -1) {\n fprintf(stderr, \"# bind(tcp): %s:%s\", host, port);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(tcp): %s:%s\", host, port);\n abort();\n }\n freeaddrinfo(addrs);\n return fd;\n}\n\nstatic int listen_unixsock(const char *unixsock, int backlog) {\n if (!unixsock || !*unixsock) {\n return 0;\n }\n struct sockaddr_un unaddr;\n int fd = socket(AF_UNIX, SOCK_STREAM, 0);\n if (fd == -1) {\n perror(\"# socket(unix)\");\n abort();\n }\n memset(&unaddr, 0, sizeof(struct sockaddr_un));\n unaddr.sun_family = AF_UNIX;\n strncpy(unaddr.sun_path, unixsock, sizeof(unaddr.sun_path) - 1);\n int ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n unlink(unixsock);\n ret = bind(fd, (struct sockaddr *)&unaddr, sizeof(struct sockaddr_un));\n if (ret == -1) {\n fprintf(stderr, \"# bind(unix): %s\", unixsock);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(unix): %s\", unixsock);\n abort();\n }\n return fd;\n}\n\nstatic atomic_uintptr_t all_ctxs = 0;\n\n// current connections\nsize_t net_nconns(void) {\n return atomic_load_explicit(&nconns, __ATOMIC_ACQUIRE);\n}\n\n// total connections ever\nsize_t net_tconns(void) {\n return atomic_load_explicit(&tconns, __ATOMIC_ACQUIRE);\n}\n\n// total rejected connections ever\nsize_t net_rconns(void) {\n return atomic_load_explicit(&rconns, __ATOMIC_ACQUIRE);\n}\n\nstatic void warmupunix(const char *unixsock, int nsocks) {\n if (!unixsock || !*unixsock) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n socks[i] = socket(AF_UNIX, SOCK_STREAM, 0);\n if (socks[i] == -1) {\n socks[i] = 0;\n continue;\n }\n struct sockaddr_un addr;\n memset(&addr, 0, sizeof(struct sockaddr_un));\n addr.sun_family = AF_UNIX;\n strncpy(addr.sun_path, unixsock, sizeof(addr.sun_path) - 1);\n if (connect(socks[i], (struct sockaddr *)&addr, \n sizeof(struct sockaddr_un)) == -1)\n {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup unix socket (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\n\nstatic void warmuptcp(const char *host, const char *port, int nsocks) {\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n struct addrinfo hints, *res;\n memset(&hints, 0, sizeof(hints));\n hints.ai_family = AF_INET;\n hints.ai_socktype = SOCK_STREAM;\n int err = getaddrinfo(host, port, &hints, &res);\n if (err != 0) {\n continue;\n }\n socks[i] = socket(res->ai_family, res->ai_socktype, res->ai_protocol);\n if (socks[i] == -1) {\n freeaddrinfo(res);\n continue;\n }\n int ret = connect(socks[i], res->ai_addr, res->ai_addrlen);\n freeaddrinfo(res);\n if (ret == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup tcp (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\nstatic void *thwarmup(void *arg) {\n // Perform a warmup of the epoll queues and listeners by making a quick\n // connection to each.\n struct net_opts *opts = arg;\n warmupunix(opts->unixsock, opts->nthreads*2);\n warmuptcp(opts->host, opts->port, opts->nthreads*2);\n return 0;\n}\n\nvoid net_main(struct net_opts *opts) {\n (void)delread;\n int sfd[3] = {\n listen_tcp(opts->host, opts->port, opts->reuseport, opts->backlog),\n listen_unixsock(opts->unixsock, opts->backlog),\n listen_tcp(opts->host, opts->tlsport, opts->reuseport, opts->backlog),\n };\n if (!sfd[0] && !sfd[1] && !sfd[2]) {\n printf(\"# No listeners provided\\n\");\n abort();\n }\n opts->listening(opts->udata);\n struct qthreadctx *ctxs = xmalloc(sizeof(struct qthreadctx)*opts->nthreads);\n memset(ctxs, 0, sizeof(struct qthreadctx)*opts->nthreads);\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n ctx->nthreads = opts->nthreads;\n ctx->tcpnodelay = opts->tcpnodelay;\n ctx->keepalive = opts->keepalive;\n ctx->quickack = opts->quickack;\n ctx->uring = !opts->nouring;\n ctx->ctxs = ctxs;\n ctx->index = i;\n ctx->maxconns = opts->maxconns;\n ctx->sfd = sfd;\n ctx->data = opts->data;\n ctx->udata = opts->udata;\n ctx->opened = opts->opened;\n ctx->closed = opts->closed;\n ctx->qfd = evqueue();\n if (ctx->qfd == -1) {\n perror(\"# evqueue\");\n abort();\n }\n atomic_init(&ctx->nconns, 0);\n for (int j = 0; j < 3; j++) {\n if (sfd[j]) {\n int ret = addread(ctx->qfd, sfd[j]);\n if (ret == -1) {\n perror(\"# addread\");\n abort();\n }\n }\n }\n ctx->unixsock = opts->unixsock;\n ctx->queuesize = opts->queuesize;\n }\n atomic_store(&all_ctxs, (uintptr_t)(void*)ctxs);\n opts->ready(opts->udata);\n if (!opts->nowarmup) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thwarmup, opts);\n if (ret != -1) {\n pthread_detach(th);\n }\n }\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n if (i == opts->nthreads-1) {\n qthread(ctx);\n } else {\n int ret = pthread_create(&ctx->th, 0, qthread, ctx);\n if (ret == -1) {\n perror(\"# pthread_create\");\n abort();\n }\n }\n }\n}\n\nstatic void *bgwork(void *arg) {\n struct bgworkctx *bgctx = arg;\n bgctx->work(bgctx->udata);\n // We are not in the same thread context as the event loop that owns this\n // connection. Adding the writer to the queue will allow for the loop\n // thread to gracefully continue the operation and then call the 'done'\n // callback.\n int ret = addwrite(bgctx->conn->ctx->qfd, bgctx->conn->fd);\n assert(ret == 0); (void)ret;\n return 0;\n}\n\n// net_conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool net_conn_bgwork(struct net_conn *conn, void (*work)(void *udata), \n void (*done)(struct net_conn *conn, void *udata), void *udata)\n{\n if (conn->bgctx || conn->closed) {\n return false;\n }\n struct qthreadctx *ctx = conn->ctx;\n int ret = delread(ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n conn->bgctx = xmalloc(sizeof(struct bgworkctx));\n memset(conn->bgctx, 0, sizeof(struct bgworkctx));\n conn->bgctx->conn = conn;\n conn->bgctx->done = done;\n conn->bgctx->work = work;\n conn->bgctx->udata = udata;\n pthread_t th;\n if (pthread_create(&th, 0, bgwork, conn->bgctx) == -1) {\n // Failed to create thread. Revert and return false.\n ret = addread(ctx->qfd, conn->fd);\n assert(ret == 0);\n xfree(conn->bgctx);\n conn->bgctx = 0;\n return false;\n } else {\n pthread_detach(th);\n }\n return true;\n}\n\nbool net_conn_bgworking(struct net_conn *conn) {\n return conn->bgctx != 0;\n}\n\nvoid net_stat_cmd_get_incr(struct net_conn *conn) {\n conn->stat_cmd_get++;\n}\n\nvoid net_stat_cmd_set_incr(struct net_conn *conn) {\n conn->stat_cmd_set++;\n}\n\nvoid net_stat_get_hits_incr(struct net_conn *conn) {\n conn->stat_get_hits++;\n}\n\nvoid net_stat_get_misses_incr(struct net_conn *conn) {\n conn->stat_get_misses++;\n}\n\nbool net_conn_istls(struct net_conn *conn) {\n return conn->tls != 0;\n}\n"], ["/pogocache/src/cmds.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit cmd.c handles all incoming client commands.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n#include \"pogocache.h\"\n#include \"stats.h\"\n\n// from main.c\nextern const uint64_t seed;\nextern const char *path;\nextern const int verb;\nextern const char *auth;\nextern const bool useauth;\nextern const char *persist;\nextern const int nthreads;\nextern const char *version;\nextern const char *githash;\nextern atomic_int_fast64_t flush_delay;\nextern atomic_bool sweep;\nextern atomic_bool lowmem;\nextern const int nshards;\nextern const int narenas;\nextern const int64_t procstart;\nextern const int maxconns;\n\nextern struct pogocache *cache;\n\nstruct set_entry_context {\n bool written;\n struct conn *conn;\n const char *cmdname;\n};\n\nstatic bool set_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)val, (void)vallen,\n (void)expires, (void)flags, (void)cas;\n struct set_entry_context *ctx = udata;\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n pg_write_row_desc(ctx->conn, (const char*[]){ \"value\" }, 1);\n pg_write_row_data(ctx->conn, (const char*[]){ val }, \n (size_t[]){ vallen }, 1);\n pg_write_completef(ctx->conn, \"%s 1\", ctx->cmdname);\n pg_write_ready(ctx->conn, 'I');\n } else {\n conn_write_bulk(ctx->conn, val, vallen);\n }\n ctx->written = true;\n return true;\n}\n\nstatic void execSET(struct conn *conn, const char *cmdname, \n int64_t now, const char *key,\n size_t keylen, const char *val, size_t vallen, int64_t expires, bool nx,\n bool xx, bool get, bool keepttl, uint32_t flags, uint64_t cas, bool withcas)\n{\n stat_cmd_set_incr(conn);\n struct set_entry_context ctx = { .conn = conn, .cmdname = cmdname };\n struct pogocache_store_opts opts = {\n .time = now,\n .expires = expires,\n .cas = cas,\n .flags = flags,\n .keepttl = keepttl,\n .casop = withcas,\n .nx = nx,\n .xx = xx,\n .lowmem = atomic_load_explicit(&lowmem, __ATOMIC_ACQUIRE),\n .entry = get?set_entry:0,\n .udata = get?&ctx:0,\n };\n int status = pogocache_store(cache, key, keylen, val, vallen, &opts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n return;\n }\n if (get) {\n if (!ctx.written) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n pg_write_completef(conn, \"%s 0\", cmdname);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_null(conn);\n }\n }\n return;\n }\n bool stored = status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED;\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (!stored) {\n if (status == POGOCACHE_FOUND) {\n conn_write_raw(conn, \"EXISTS\\r\\n\", 8);\n } else {\n conn_write_raw(conn, \"NOT_FOUND\\r\\n\", 12);\n }\n } else {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n }\n break;\n case PROTO_HTTP:\n if (!stored) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Stored\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"%s %d\", cmdname, stored?1:0);\n pg_write_ready(conn, 'I');\n break;\n default:\n if (!stored) {\n conn_write_null(conn);\n } else {\n conn_write_string(conn, \"OK\");\n }\n break;\n }\n}\n\nstatic int64_t expiry_seconds_time(struct conn *conn, int64_t now, \n int64_t expiry)\n{\n if (conn_proto(conn) == PROTO_MEMCACHE && expiry > HOUR*24*30) {\n // Consider Unix time value rather than an offset from current time.\n int64_t unix_ = sys_unixnow();\n if (expiry > unix_) {\n expiry = expiry-sys_unixnow();\n } else {\n expiry = 0;\n }\n }\n return int64_add_clamp(now, expiry);\n}\n\n// SET key value [NX | XX] [GET] [EX seconds | PX milliseconds |\n// EXAT unix-time-seconds | PXAT unix-time-milliseconds | KEEPTTL] \n// [FLAGS flags] [CAS cas] \nstatic void cmdSET(struct conn *conn, struct args *args) {\n#ifdef CMDSETOK\n // For testing the theoretical top speed of a single SET command.\n // No data is stored.\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\n#endif\n // RESP command\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n const char *val = args->bufs[2].data;\n size_t vallen = args->bufs[2].len;\n int64_t expires = 0;\n int exkind = 0;\n bool nx = false;\n bool xx = false;\n bool get = false;\n bool keepttl = false;\n bool hasex = false;\n uint32_t flags = 0;\n uint64_t cas = 0;\n bool withcas = false;\n for (size_t i = 3; i < args->len; i++) {\n if (argeq(args, i, \"ex\")) {\n exkind = 1;\n goto parse_ex;\n } else if (argeq(args, i, \"px\")) {\n exkind = 2;\n goto parse_ex;\n } else if (argeq(args, i, \"exat\")) {\n exkind = 3;\n goto parse_ex;\n } else if (argeq(args, i, \"pxat\")) {\n exkind = 4;\n parse_ex:\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, \n &expires);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n if (expires <= 0) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n // memcache allows for negative expiration\n expires = expiry_seconds_time(conn, now, 0);\n goto skip_exkind;\n } else {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n }\n switch (exkind) {\n case 1:\n expires = int64_mul_clamp(expires, SECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 2:\n expires = int64_mul_clamp(expires, MILLISECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 3:\n expires = int64_mul_clamp(expires, SECOND);\n break;\n case 4:\n expires = int64_mul_clamp(expires, MILLISECOND);\n break;\n }\n skip_exkind:\n hasex = true;\n } else if (argeq(args, i, \"nx\")) {\n nx = true;\n } else if (argeq(args, i, \"xx\")) {\n xx = true;\n } else if (argeq(args, i, \"get\")) {\n get = true;\n } else if (argeq(args, i, \"keepttl\")) {\n keepttl = true;\n } else if (argeq(args, i, \"flags\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n uint64_t x;\n if (!argu64(args, i, &x)) {\n goto err_syntax;\n }\n flags = x&UINT32_MAX;\n } else if (argeq(args, i, \"cas\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n if (!argu64(args, i, &cas)) {\n goto err_syntax;\n }\n withcas = true;\n } else {\n goto err_syntax;\n }\n }\n assert(expires >= 0);\n if (keepttl && hasex > 0){\n goto err_syntax;\n }\n if (xx && nx > 0){\n goto err_syntax;\n }\n execSET(conn, \"SET\", now, key, keylen, val, vallen, expires, nx, xx, get,\n keepttl, flags, cas, withcas);\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n}\n\nstatic void cmdSETEX(struct conn *conn, struct args *args) {\n if (args->len != 4) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t ex = 0;\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool ok = parse_i64(args->bufs[2].data, args->bufs[2].len, &ex);\n if (!ok || ex <= 0) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n ex = int64_mul_clamp(ex, SECOND);\n ex = int64_add_clamp(sys_now(), ex);\n const char *val = args->bufs[3].data;\n size_t vallen = args->bufs[3].len;\n execSET(conn, \"SETEX\", now, key, keylen, val, vallen, ex, 0, 0, 0, 0, 0, 0,\n 0);\n}\n\nstruct get_entry_context {\n struct conn *conn;\n bool cas;\n bool mget;\n};\n\nstatic void get_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)key, (void)keylen, (void)cas;\n (void)shard, (void)time, (void)expires, (void)flags, (void)update;\n struct get_entry_context *ctx = udata;\n int x;\n uint8_t buf[24];\n size_t n;\n switch (conn_proto(ctx->conn)) {\n case PROTO_POSTGRES:;\n char casbuf[24];\n if (ctx->cas) {\n x = 1;\n n = snprintf(casbuf, sizeof(casbuf), \"%\" PRIu64, cas);\n } else {\n x = 0;\n casbuf[0] = '\\0';\n n = 0;\n }\n if (ctx->mget) {\n pg_write_row_data(ctx->conn, (const char*[]){ key, val, casbuf }, \n (size_t[]){ keylen, vallen, n }, 2+x);\n } else {\n pg_write_row_data(ctx->conn, (const char*[]){ val, casbuf }, \n (size_t[]){ vallen, n }, 1+x);\n }\n break;\n case PROTO_MEMCACHE:\n conn_write_raw(ctx->conn, \"VALUE \", 6);\n conn_write_raw(ctx->conn, key, keylen);\n n = u64toa(flags, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n n = u64toa(vallen, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n if (ctx->cas) {\n n = u64toa(cas, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n }\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n conn_write_raw(ctx->conn, val, vallen);\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n break;\n case PROTO_HTTP:\n conn_write_http(ctx->conn, 200, \"OK\", val, vallen);\n break;\n default:\n if (ctx->cas) {\n conn_write_array(ctx->conn, 2);\n conn_write_uint(ctx->conn, cas);\n }\n conn_write_bulk(ctx->conn, val, vallen);\n }\n}\n\n// GET key\nstatic void cmdGET(struct conn *conn, struct args *args) {\n stat_cmd_get_incr(conn);\n#ifdef CMDGETNIL\n conn_write_null(conn);\n return;\n#endif\n#ifdef CMDSETOK\n conn_write_string(conn, \"$1\\r\\nx\\r\\n\");\n return;\n#endif\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n struct get_entry_context ctx = { \n .conn = conn\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_HTTP) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\" , -1);\n } else if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 0\");\n } else {\n conn_write_null(conn);\n }\n } else {\n stat_get_hits_incr(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 1\");\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_ready(conn, 'I');\n }\n}\n\n// MGET key [key...]\nstatic void cmdMGET(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct get_entry_context ctx = { \n .conn = conn,\n .mget = true,\n .cas = argeq(args, 0, \"mgets\"),\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int count = 0;\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\", \"value\", \"cas\" }, \n 2+(ctx.cas?1:0));\n } else if (proto == PROTO_RESP) {\n conn_write_array(conn, args->len-1);\n }\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_get_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_null(conn);\n }\n } else {\n count++;\n stat_get_hits_incr(conn);\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"MGET %d\", count);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n }\n}\n\nstruct keys_ctx {\n int64_t now;\n struct buf buf;\n size_t count;\n char *pattern;\n size_t plen;\n};\n\nstatic void keys_ctx_free(struct keys_ctx *ctx) {\n xfree(ctx->pattern);\n buf_clear(&ctx->buf);\n xfree(ctx);\n}\n\n// pattern matcher\n// see https://github.com/tidwall/match.c\nstatic bool match(const char *pat, size_t plen, const char *str, size_t slen,\n int depth)\n{\n if (depth == 128) {\n return false;\n }\n while (plen > 0) {\n if (pat[0] == '\\\\') {\n if (plen == 1) return false;\n pat++; plen--; \n } else if (pat[0] == '*') {\n if (plen == 1) return true;\n if (pat[1] == '*') {\n pat++; plen--;\n continue;\n }\n if (match(pat+1, plen-1, str, slen, depth+1)) return true;\n if (slen == 0) return false;\n str++; slen--;\n continue;\n }\n if (slen == 0) return false;\n if (pat[0] != '?' && str[0] != pat[0]) return false;\n pat++; plen--;\n str++; slen--;\n }\n return slen == 0 && plen == 0;\n}\n\nstatic int keys_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)value, (void)valuelen, (void)expires, \n (void)flags, (void)cas;\n struct keys_ctx *ctx = udata;\n if ((ctx->plen == 1 && *ctx->pattern == '*') || \n match(ctx->pattern, ctx->plen, key, keylen, 0))\n {\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n ctx->count++;\n }\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void bgkeys_work(void *udata) {\n struct keys_ctx *ctx = udata;\n struct pogocache_iter_opts opts = {\n .time = ctx->now,\n .entry = keys_entry,\n .udata = ctx,\n };\n pogocache_iter(cache, &opts);\n}\n\nstatic void bgkeys_done(struct conn *conn, void *udata) {\n struct keys_ctx *ctx = udata;\n int proto = conn_proto(conn);\n const char *p = ctx->buf.data;\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\" }, 1);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n pg_write_row_data(conn, (const char*[]){ key }, \n (size_t[]){ keylen }, 1);\n }\n pg_write_completef(conn, \"KEYS %zu\", ctx->count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_array(conn, ctx->count);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n conn_write_bulk(conn, key, keylen);\n }\n }\n keys_ctx_free(ctx);\n}\n\nstatic void cmdKEYS(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *pattern = args->bufs[1].data;\n size_t plen = args->bufs[1].len;\n struct keys_ctx *ctx = xmalloc(sizeof(struct keys_ctx));\n memset(ctx, 0, sizeof(struct keys_ctx));\n ctx->pattern = xmalloc(plen+1);\n memcpy(ctx->pattern, pattern, plen);\n ctx->pattern[plen] = '\\0';\n ctx->plen = plen;\n ctx->now = now;\n if (!conn_bgwork(conn, bgkeys_work, bgkeys_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n keys_ctx_free(ctx);\n }\n}\n\nstatic void cmdDEL(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct pogocache_delete_opts opts = {\n .time = now,\n };\n int64_t deleted = 0;\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_delete(cache, key, keylen, &opts);\n if (status == POGOCACHE_DELETED) {\n stat_delete_hits_incr(conn);\n deleted++;\n } else {\n stat_delete_misses_incr(conn);\n }\n }\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (deleted == 0) {\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n } else {\n conn_write_raw_cstr(conn, \"DELETED\\r\\n\");\n }\n break;\n case PROTO_HTTP:\n if (deleted == 0) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Deleted\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"DEL %\" PRIi64, deleted);\n pg_write_ready(conn, 'I');\n break;\n default:\n conn_write_int(conn, deleted);\n }\n}\n\nstatic void cmdDBSIZE(struct conn *conn, struct args *args) {\n if (args->len != 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n struct pogocache_count_opts opts = { .time = sys_now() };\n size_t count = pogocache_count(cache, &opts);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"count\", count, \"DBSIZE\");\n } else {\n conn_write_int(conn, (int64_t)count);\n }\n}\n\nstruct flushctx { \n pthread_t th;\n int64_t time;\n int start;\n int count;\n};\n\nstatic void *thflush(void *arg) {\n struct flushctx *ctx = arg;\n struct pogocache_clear_opts opts = { .time = sys_now(), .oneshard = true };\n for (int i = 0; i < ctx->count; i++) {\n opts.oneshardidx = i+ctx->start;\n pogocache_clear(cache, &opts);\n }\n return 0;\n}\n\nstatic void bgflushwork(void *udata) {\n (void)udata;\n atomic_store(&flush_delay, 0);\n int64_t now = sys_now();\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n struct flushctx *ctxs = xmalloc(nprocs*sizeof(struct flushctx));\n memset(ctxs, 0, nprocs*sizeof(struct flushctx));\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->time = now;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (pthread_create(&ctx->th, 0, thflush, ctx) == -1) {\n ctx->th = 0;\n }\n start += ctx->count;\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thflush(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n}\n\nstatic void bgflushdone(struct conn *conn, void *udata) {\n const char *cmdname = udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s SYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\n// FLUSHALL [SYNC|ASYNC] [DELAY ]\nstatic void cmdFLUSHALL(struct conn *conn, struct args *args) {\n const char *cmdname = \n args_eq(args, 0, \"flush\") ? \"FLUSH\" :\n args_eq(args, 0, \"flushdb\") ? \"FLUSHDB\" :\n \"FLUSHALL\";\n stat_cmd_flush_incr(conn);\n bool async = false;\n int64_t delay = 0;\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"async\")) {\n async = true;\n } else if (argeq(args, i, \"sync\")) {\n async = false;\n } else if (argeq(args, i, \"delay\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, &delay);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid exptime argument\");\n return;\n }\n if (delay > 0) {\n async = true;\n }\n } else {\n goto err_syntax;\n }\n }\n if (async) {\n if (delay < 0) {\n delay = 0;\n }\n delay = int64_mul_clamp(delay, SECOND);\n delay = int64_add_clamp(delay, sys_now());\n atomic_store(&flush_delay, delay);\n // ticker will check the delay and perform the flush\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s ASYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n // Flush database is slow. cmdname is static and thread safe\n conn_bgwork(conn, bgflushwork, bgflushdone, (void*)cmdname);\n return;\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct bgsaveloadctx {\n bool ok; // true = success, false = out of disk space\n bool fast; // use all the proccesing power, otherwise one thread.\n char *path; // path to file\n bool load; // otherwise save\n};\n\nstatic void bgsaveloadwork(void *udata) {\n struct bgsaveloadctx *ctx = udata;\n int64_t start = sys_now();\n int status;\n if (ctx->load) {\n status = load(ctx->path, ctx->fast, 0);\n } else {\n status = save(ctx->path, ctx->fast);\n }\n printf(\". %s finished %.3f secs\\n\", ctx->load?\"load\":\"save\", \n (sys_now()-start)/1e9);\n ctx->ok = status == 0;\n}\n\nstatic void bgsaveloaddone(struct conn *conn, void *udata) {\n struct bgsaveloadctx *ctx = udata;\n if (ctx->ok) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s OK\", ctx->load?\"LOAD\":\"SAVE\");\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (ctx->load) {\n conn_write_error(conn, \"load failed\");\n } else {\n conn_write_error(conn, \"save failed\");\n }\n }\n xfree(ctx->path);\n xfree(ctx);\n}\n\n// SAVE [TO ] [FAST]\n// LOAD [FROM ] [FAST]\nstatic void cmdSAVELOAD(struct conn *conn, struct args *args) {\n bool load = argeq(args, 0, \"load\");\n bool fast = false;\n const char *path = persist;\n size_t plen = strlen(persist);\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"fast\")) {\n fast = true;\n } else if ((load && argeq(args, i, \"from\")) || \n (!load && argeq(args, i, \"to\")))\n {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n path = args->bufs[i].data;\n plen = args->bufs[i].len;\n } else {\n goto err_syntax;\n }\n }\n if (plen == 0) {\n conn_write_error(conn, \"ERR path not provided\");\n return;\n }\n struct bgsaveloadctx *ctx = xmalloc(sizeof(struct bgsaveloadctx));\n memset(ctx, 0, sizeof(struct bgsaveloadctx));\n ctx->fast = fast;\n ctx->path = xmalloc(plen+1);\n ctx->load = load;\n memcpy(ctx->path, path, plen);\n ctx->path[plen] = '\\0';\n if (!conn_bgwork(conn, bgsaveloadwork, bgsaveloaddone, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx->path);\n xfree(ctx);\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct ttlctx {\n struct conn *conn;\n bool pttl;\n};\n\nstatic void ttl_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)key, (void)keylen, (void)val, (void)vallen, (void)flags,\n (void)cas, (void)update;\n struct ttlctx *ctx = udata;\n int64_t ttl;\n if (expires > 0) {\n ttl = expires-time;\n if (ctx->pttl) {\n ttl /= MILLISECOND;\n } else {\n ttl /= SECOND;\n }\n } else {\n ttl = -1;\n }\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n char ttlstr[24];\n size_t n = i64toa(ttl, (uint8_t*)ttlstr);\n pg_write_row_data(ctx->conn, (const char*[]){ ttlstr }, \n (size_t[]){ n }, 1);\n } else {\n conn_write_int(ctx->conn, ttl);\n }\n}\n\nstatic void cmdTTL(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool pttl = argeq(args, 0, \"pttl\");\n struct ttlctx ctx = { .conn = conn, .pttl = pttl };\n struct pogocache_load_opts opts = {\n .time = sys_now(),\n .entry = ttl_entry,\n .notouch = true,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ pttl?\"pttl\":\"ttl\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_int(conn, -2);\n }\n } else {\n stat_get_hits_incr(conn);\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %d\", pttl?\"PTTL\":\"TTL\",\n status!=POGOCACHE_NOTFOUND);\n pg_write_ready(conn, 'I');\n }\n}\n\nstatic void expire_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)expires, (void)cas;\n struct pogocache_update *ctx = udata;\n ctx->flags = flags;\n ctx->value = value;\n ctx->valuelen = valuelen;\n *update = ctx;\n}\n\n// EXPIRE key seconds\n// returns 1 if success or 0 on failure. \nstatic void cmdEXPIRE(struct conn *conn, struct args *args) {\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n int64_t expires;\n if (!argi64(args, 2, &expires)) {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n return;\n }\n expires = int64_mul_clamp(expires, POGOCACHE_SECOND);\n expires = int64_add_clamp(now, expires);\n struct pogocache_update ctx = { .expires = expires };\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = expire_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(cache, key, keylen, &lopts);\n int ret = status == POGOCACHE_FOUND;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"EXPIRE %d\", ret);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, ret);\n }\n}\n\n// EXISTS key [key...]\n// Checks if one or more keys exist in the cache.\n// Return the number of keys that exist\nstatic void cmdEXISTS(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t count = 0;\n struct pogocache_load_opts opts = {\n .time = now,\n .notouch = true,\n };\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n count++;\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"exists\", count, \"EXISTS\");\n } else {\n conn_write_int(conn, count);\n }\n}\n\nstatic void sweep_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n size_t swept;\n size_t kept;\n struct pogocache_sweep_opts opts = {\n .time = start,\n };\n printf(\". sweep started\\n\");\n pogocache_sweep(cache, &swept, &kept, &opts);\n double elapsed = (sys_now()-start)/1e9;\n printf(\". sweep finished in %.2fs, (swept=%zu, kept=%zu) \\n\", elapsed, \n swept, kept);\n}\n\nstatic void sweep_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thsweep(void *arg) {\n (void)arg;\n sweep_work(0);\n return 0;\n}\n\n// SWEEP [ASYNC]\nstatic void cmdSWEEP(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thsweep, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, sweep_work, sweep_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstatic void purge_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n printf(\". purge started\\n\");\n xpurge();\n double elapsed = (sys_now()-start)/1e9;\n printf(\". purge finished in %.2fs\\n\", elapsed);\n}\n\nstatic void purge_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thpurge(void *arg) {\n (void)arg;\n purge_work(0);\n return 0;\n}\n\n// PURGE [ASYNC]\nstatic void cmdPURGE(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thpurge, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, purge_work, purge_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstruct populate_ctx {\n pthread_t th;\n size_t start;\n size_t count;\n char *prefix;\n size_t prefixlen;\n char *val;\n size_t vallen;\n bool randex;\n int randmin;\n int randmax;\n};\n\nstatic void *populate_entry(void *arg) {\n int64_t now = sys_now();\n struct populate_ctx *ctx = arg;\n char *key = xmalloc(ctx->prefixlen+32);\n memcpy(key, ctx->prefix, ctx->prefixlen);\n key[ctx->prefixlen++] = ':';\n for (size_t i = ctx->start; i < ctx->start+ctx->count; i++) {\n size_t n = i64toa(i, (uint8_t*)(key+ctx->prefixlen));\n size_t keylen = ctx->prefixlen+n;\n struct pogocache_store_opts opts = { \n .time = now,\n };\n if (ctx->randex) {\n int ex = (rand()%(ctx->randmax-ctx->randmin))+ctx->randmin;\n opts.ttl = ex*POGOCACHE_SECOND;\n }\n pogocache_store(cache, key, keylen, ctx->val, ctx->vallen, &opts);\n }\n xfree(key);\n return 0;\n}\n\n// DEBUG POPULATE [rand-ex-range]\n// DEBUG POPULATE \n// DEBUG POPULATE 1000000 test 16\n// DEBUG POPULATE 1000000 test 16 5-10\nstatic void cmdDEBUG_populate(struct conn *conn, struct args *args) {\n if (args->len != 4 && args->len != 5) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t count;\n if (!argi64(args, 1, &count) || count < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n size_t prefixlen = args->bufs[2].len;\n char *prefix = args->bufs[2].data;\n int64_t vallen;\n if (!argi64(args, 3, &vallen) || vallen < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n bool randex = false;\n int randmin = 0;\n int randmax = 0;\n if (args->len == 5) {\n size_t exlen = args->bufs[4].len;\n char *aex = args->bufs[4].data;\n char *ex = xmalloc(exlen+1);\n memcpy(ex, aex, exlen);\n ex[exlen] = '\\0';\n if (strchr(ex, '-')) {\n randmin = atoi(ex);\n randmax = atoi(strchr(ex, '-')+1);\n randex = true;\n }\n xfree(ex);\n }\n\n char *val = xmalloc(vallen);\n memset(val, 0, vallen);\n int nprocs = sys_nprocs();\n if (nprocs < 0) {\n nprocs = 1;\n }\n struct populate_ctx *ctxs = xmalloc(nprocs*sizeof(struct populate_ctx));\n memset(ctxs, 0, nprocs*sizeof(struct populate_ctx));\n size_t group = count/nprocs;\n size_t start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n ctx->start = start;\n if (i == nprocs-1) {\n ctx->count = count-start;\n } else {\n ctx->count = group;\n }\n ctx->prefix = prefix;\n ctx->prefixlen = prefixlen;\n ctx->val = val;\n ctx->vallen = vallen;\n ctx->randex = randex;\n ctx->randmin = randmin;\n ctx->randmax = randmax;\n if (pthread_create(&ctx->th, 0, populate_entry, ctx) == -1) {\n ctx->th = 0;\n }\n start += group;\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n populate_entry(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n xfree(val);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"DEBUG POPULATE %\" PRIi64, count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstruct dbg_detach_ctx {\n int64_t now;\n int64_t then;\n};\n\nstatic void detach_work(void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n ctx->then = sys_now();\n // printf(\". ----- DELAY START\\n\");\n // sleep(1);\n // printf(\". ----- DELAY END\\n\");\n}\n\nstatic void detach_done(struct conn *conn, void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n char buf[128];\n snprintf(buf, sizeof(buf), \"%\" PRId64 \":%\" PRId64, ctx->now, ctx->then);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_str_ready(conn, \"detach\", buf, \"DEBUG DETACH\");\n } else {\n conn_write_bulk_cstr(conn, buf);\n }\n xfree(ctx);\n}\n\n// DEBUG detach\nstatic void cmdDEBUG_detach(struct conn *conn, struct args *args) {\n (void)args;\n struct dbg_detach_ctx *ctx = xmalloc(sizeof(struct dbg_detach_ctx));\n memset(ctx, 0,sizeof(struct dbg_detach_ctx));\n ctx->now = sys_now();\n if (!conn_bgwork(conn, detach_work, detach_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx);\n }\n}\n\n// DEBUG subcommand (args...)\nstatic void cmdDEBUG(struct conn *conn, struct args *args) {\n if (args->len <= 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n // args = args[1:]\n args = &(struct args){ .bufs = args->bufs+1, .len = args->len-1 };\n if (argeq(args, 0, \"populate\")) {\n cmdDEBUG_populate(conn, args);\n } else if (argeq(args, 0, \"detach\")) {\n cmdDEBUG_detach(conn, args);\n } else {\n conn_write_error(conn, \"ERR unknown subcommand\");\n }\n}\n\nstatic void cmdECHO(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"ECHO\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n}\n\nstatic void cmdPING(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n if (args->len == 1) {\n pg_write_simple_row_str_ready(conn, \"message\", \"PONG\", \"PING\"); \n } else {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"PING\");\n }\n } else {\n if (args->len == 1) {\n conn_write_string(conn, \"PONG\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n }\n}\n\nstatic void cmdQUIT(struct conn *conn, struct args *args) {\n (void)args;\n if (conn_proto(conn) == PROTO_RESP) {\n conn_write_string(conn, \"OK\");\n }\n conn_close(conn);\n}\n\n// TOUCH key [key...]\nstatic void cmdTOUCH(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t touched = 0;\n struct pogocache_load_opts opts = { \n .time = now,\n };\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_touch_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n stat_touch_hits_incr(conn);\n touched++;\n } else {\n stat_touch_misses_incr(conn);\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"TOUCH %\" PRIi64, touched);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, touched);\n }\n}\n\nstruct get64ctx {\n bool ok;\n bool isunsigned;\n union {\n int64_t ival;\n uint64_t uval;\n };\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n};\n\nunion delta { \n uint64_t u;\n int64_t i;\n};\n\nstatic void get64(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update;\n struct get64ctx *ctx = udata;\n ctx->flags = flags;\n ctx->expires = expires;\n ctx->cas = cas;\n if (ctx->isunsigned) {\n ctx->ok = parse_u64(val, vallen, &ctx->uval);\n } else {\n ctx->ok = parse_i64(val, vallen, &ctx->ival);\n }\n}\n\nstatic void execINCRDECR(struct conn *conn, const char *key, size_t keylen, \n union delta delta, bool decr, bool isunsigned, const char *cmdname)\n{\n bool hit = false;\n bool miss = false;\n int64_t now = sys_now();\n struct get64ctx ctx = { .isunsigned = isunsigned };\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts gopts = {\n .time = now,\n .entry = get64,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &gopts);\n bool found = status == POGOCACHE_FOUND;\n if (found && !ctx.ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR cannot increment or \"\n \"decrement non-numeric value\\r\\n\");\n goto done;\n }\n goto fail_value_non_numeric;\n } else if (!found && conn_proto(conn) == PROTO_MEMCACHE) {\n miss = true;\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n goto done;\n }\n // add or subtract\n bool overflow;\n if (isunsigned) {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.uval, delta.u, &ctx.uval);\n } else {\n overflow = __builtin_add_overflow(ctx.uval, delta.u, &ctx.uval);\n }\n } else {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.ival, delta.i, &ctx.ival);\n } else {\n overflow = __builtin_add_overflow(ctx.ival, delta.i, &ctx.ival);\n }\n }\n if (overflow && conn_proto(conn) != PROTO_MEMCACHE) {\n goto fail_overflow;\n }\n // re-set the value\n char val[24];\n size_t vallen;\n if (isunsigned) {\n vallen = u64toa(ctx.uval, (uint8_t*)val);\n } else {\n vallen = i64toa(ctx.ival, (uint8_t*)val);\n }\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires, \n .flags = ctx.flags, \n .cas = ctx.cas,\n .udata = &ctx,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n char val[24];\n if (isunsigned) {\n snprintf(val, sizeof(val), \"%\" PRIu64, ctx.uval);\n } else {\n snprintf(val, sizeof(val), \"%\" PRIi64, ctx.ival);\n }\n pg_write_simple_row_str_readyf(conn, \"value\", val, \"%s\", cmdname);\n } else {\n if (isunsigned) {\n conn_write_uint(conn, ctx.uval);\n } else {\n conn_write_int(conn, ctx.ival);\n }\n }\n hit = true;\n goto done;\nfail_value_non_numeric:\n conn_write_error(conn, ERR_INVALID_INTEGER);\n goto done;\nfail_overflow:\n conn_write_error(conn, \"ERR increment or decrement would overflow\");\n goto done;\ndone:\n if (hit) {\n if (decr) {\n stat_decr_hits_incr(conn);\n } else {\n stat_incr_hits_incr(conn);\n }\n } else if (miss) {\n if (decr) {\n stat_decr_misses_incr(conn);\n } else {\n stat_incr_misses_incr(conn);\n }\n }\n pogocache_end(batch);\n}\n\nstatic void cmdINCRDECRBY(struct conn *conn, struct args *args, \n bool decr, const char *cmdname)\n{\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta;\n bool ok;\n if (isunsigned) {\n ok = argu64(args, 2, &delta.u);\n } else {\n ok = argi64(args, 2, &delta.i);\n }\n if (!ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR invalid numeric delta \"\n \"argument\\r\\n\");\n } else {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n }\n return;\n }\n execINCRDECR(conn, key, keylen, delta, decr, isunsigned, cmdname);\n}\n\n// DECRBY key num\nstatic void cmdDECRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, true, \"DECRBY\");\n}\n\n// INCRBY key num\nstatic void cmdINCRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, false, \"INCRBY\");\n}\n\n// DECR key\nstatic void cmdDECR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, true, isunsigned, \"DECR\");\n}\n\n// INCR key\nstatic void cmdINCR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, false, isunsigned, \"INCR\");\n}\n\nstruct appendctx {\n bool prepend;\n uint32_t flags;\n int64_t expires;\n const char *val;\n size_t vallen;\n char *outval;\n size_t outvallen;\n};\n\nstatic void append_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires, \n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update, (void)cas;\n struct appendctx *ctx = udata;\n ctx->expires = expires;\n ctx->flags = flags;\n ctx->outvallen = vallen+ctx->vallen;\n ctx->outval = xmalloc(ctx->outvallen);\n if (ctx->prepend) {\n memcpy(ctx->outval, ctx->val, ctx->vallen);\n memcpy(ctx->outval+ctx->vallen, val, vallen);\n } else {\n memcpy(ctx->outval, val, vallen);\n memcpy(ctx->outval+vallen, ctx->val, ctx->vallen);\n }\n}\n\n// APPEND \nstatic void cmdAPPEND(struct conn *conn, struct args *args) {\n int64_t now = sys_now();\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int proto = conn_proto(conn);\n bool prepend = argeq(args, 0, \"prepend\");\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n size_t vallen;\n const char *val = args_at(args, 2, &vallen);\n struct appendctx ctx = { \n .prepend = prepend,\n .val = val,\n .vallen = vallen,\n };\n size_t len;\n // Use a batch transaction for key isolation.\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = append_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &lopts);\n if (status == POGOCACHE_NOTFOUND) {\n if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"NOT_STORED\\r\\n\");\n goto done;\n }\n len = vallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n } else {\n if (ctx.outvallen > MAXARGSZ) {\n // do not let values become larger than 500MB\n xfree(ctx.outval);\n conn_write_error(conn, \"ERR value too large\");\n goto done;\n }\n len = ctx.outvallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires,\n .flags = ctx.flags,\n };\n status = pogocache_store(batch, key, keylen, ctx.outval, ctx.outvallen, \n &sopts);\n xfree(ctx.outval);\n }\n if (status == POGOCACHE_NOMEM) {\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %zu\", prepend?\"PREPEND\":\"APPEND\", len);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"STORED\\r\\n\");\n } else {\n conn_write_int(conn, len);\n }\ndone:\n pogocache_end(batch);\n}\n\nstatic void cmdPREPEND(struct conn *conn, struct args *args) {\n cmdAPPEND(conn, args);\n}\n\nstatic void cmdAUTH(struct conn *conn, struct args *args) {\n stat_auth_cmds_incr(0);\n if (!argeq(args, 0, \"auth\")) {\n stat_auth_errors_incr(0);\n goto noauth;\n }\n if (args->len == 3) {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n if (args->len > 3) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n if (args->len == 1) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (args->bufs[1].len != strlen(auth) || \n memcmp(auth, args->bufs[1].data, args->bufs[1].len) != 0)\n {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n conn_setauth(conn, true);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_complete(conn, \"AUTH OK\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\nnoauth:\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \n \"CLIENT_ERROR Authentication required\\r\\n\");\n } else {\n conn_write_error(conn, \"NOAUTH Authentication required.\");\n }\n return;\nwrongpass:\n conn_write_error(conn, \n \"WRONGPASS invalid username-password pair or user is disabled.\");\n}\n\nstruct stats {\n // use the args type as a list.\n struct args args;\n};\n\nstatic void stats_begin(struct stats *stats) {\n memset(stats, 0, sizeof(struct stats));\n}\n\nstatic void stats_end(struct stats *stats, struct conn *conn) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"stat\", \"value\" }, 2);\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n pg_write_row_data(conn, (const char*[]){ stat, val }, \n (size_t[]){ strlen(stat), strlen(val) }, 2);\n }\n pg_write_completef(conn, \"STATS %zu\", stats->args.len);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n char line[512];\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n size_t n = snprintf(line, sizeof(line), \"STAT %s\\r\\n\", stat);\n conn_write_raw(conn, line, n);\n }\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n } else {\n conn_write_array(conn, stats->args.len);\n for (size_t i = 0; i < stats->args.len; i++) {\n conn_write_array(conn, 2);\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n conn_write_bulk_cstr(conn, key);\n conn_write_bulk_cstr(conn, val);\n }\n }\n args_free(&stats->args);\n}\n\nstatic void stats_printf(struct stats *stats, const char *format, ...) {\n // initializing list pointer\n char line[512];\n va_list ap;\n va_start(ap, format);\n size_t len = vsnprintf(line, sizeof(line)-1, format, ap);\n va_end(ap);\n args_append(&stats->args, line, len+1, false); // include null-terminator\n}\n\nstatic void stats(struct conn *conn) {\n struct stats stats;\n stats_begin(&stats);\n stats_printf(&stats, \"pid %d\", getpid());\n stats_printf(&stats, \"uptime %.0f\", (sys_now()-procstart)/1e9);\n stats_printf(&stats, \"time %.0f\", sys_unixnow()/1e9);\n stats_printf(&stats, \"product %s\", \"pogocache\");\n stats_printf(&stats, \"version %s\", version);\n stats_printf(&stats, \"githash %s\", githash);\n stats_printf(&stats, \"pointer_size %zu\", sizeof(uintptr_t)*8);\n struct rusage usage;\n if (getrusage(RUSAGE_SELF, &usage) == 0) {\n stats_printf(&stats, \"rusage_user %ld.%06ld\",\n usage.ru_utime.tv_sec, usage.ru_utime.tv_usec);\n stats_printf(&stats, \"rusage_system %ld.%06ld\",\n usage.ru_stime.tv_sec, usage.ru_stime.tv_usec);\n }\n stats_printf(&stats, \"max_connections %zu\", maxconns);\n stats_printf(&stats, \"curr_connections %zu\", net_nconns());\n stats_printf(&stats, \"total_connections %zu\", net_tconns());\n stats_printf(&stats, \"rejected_connections %zu\", net_rconns());\n stats_printf(&stats, \"cmd_get %\" PRIu64, stat_cmd_get());\n stats_printf(&stats, \"cmd_set %\" PRIu64, stat_cmd_set());\n stats_printf(&stats, \"cmd_flush %\" PRIu64, stat_cmd_flush());\n stats_printf(&stats, \"cmd_touch %\" PRIu64, stat_cmd_touch());\n stats_printf(&stats, \"get_hits %\" PRIu64, stat_get_hits());\n stats_printf(&stats, \"get_misses %\" PRIu64, stat_get_misses());\n stats_printf(&stats, \"delete_misses %\" PRIu64, stat_delete_misses());\n stats_printf(&stats, \"delete_hits %\" PRIu64, stat_delete_hits());\n stats_printf(&stats, \"incr_misses %\" PRIu64, stat_incr_misses());\n stats_printf(&stats, \"incr_hits %\" PRIu64, stat_incr_hits());\n stats_printf(&stats, \"decr_misses %\" PRIu64, stat_decr_misses());\n stats_printf(&stats, \"decr_hits %\" PRIu64, stat_decr_hits());\n stats_printf(&stats, \"touch_hits %\" PRIu64, stat_touch_hits());\n stats_printf(&stats, \"touch_misses %\" PRIu64, stat_touch_misses());\n stats_printf(&stats, \"store_too_large %\" PRIu64, stat_store_too_large());\n stats_printf(&stats, \"store_no_memory %\" PRIu64, stat_store_no_memory());\n stats_printf(&stats, \"auth_cmds %\" PRIu64, stat_auth_cmds());\n stats_printf(&stats, \"auth_errors %\" PRIu64, stat_auth_errors());\n stats_printf(&stats, \"threads %d\", nthreads);\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n stats_printf(&stats, \"rss %zu\", meminfo.rss);\n struct pogocache_size_opts sopts = { .entriesonly=true };\n stats_printf(&stats, \"bytes %zu\", pogocache_size(cache, &sopts));\n stats_printf(&stats, \"curr_items %zu\", pogocache_count(cache, 0));\n stats_printf(&stats, \"total_items %\" PRIu64, pogocache_total(cache, 0));\n stats_end(&stats, conn);\n}\n\nstatic void cmdSTATS(struct conn *conn, struct args *args) {\n if (args->len == 1) {\n return stats(conn);\n }\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\n// Commands hash table. Lazy loaded per thread.\n// Simple open addressing using case-insensitive fnv1a hashes.\nstatic int nbuckets;\nstatic struct cmd *buckets;\n\nstruct cmd {\n const char *name;\n void (*func)(struct conn *conn, struct args *args);\n};\n\nstatic struct cmd cmds[] = {\n { \"set\", cmdSET }, // pg\n { \"get\", cmdGET }, // pg\n { \"del\", cmdDEL }, // pg\n { \"mget\", cmdMGET }, // pg\n { \"mgets\", cmdMGET }, // pg cas detected\n { \"ttl\", cmdTTL }, // pg\n { \"pttl\", cmdTTL }, // pg\n { \"expire\", cmdEXPIRE }, // pg\n { \"setex\", cmdSETEX }, // pg\n { \"dbsize\", cmdDBSIZE }, // pg\n { \"quit\", cmdQUIT }, // pg\n { \"echo\", cmdECHO }, // pg\n { \"exists\", cmdEXISTS }, // pg\n { \"flushdb\", cmdFLUSHALL }, // pg\n { \"flushall\", cmdFLUSHALL }, // pg\n { \"flush\", cmdFLUSHALL }, // pg\n { \"purge\", cmdPURGE }, // pg\n { \"sweep\", cmdSWEEP }, // pg\n { \"keys\", cmdKEYS }, // pg\n { \"ping\", cmdPING }, // pg\n { \"touch\", cmdTOUCH }, // pg\n { \"debug\", cmdDEBUG }, // pg\n { \"incrby\", cmdINCRBY }, // pg\n { \"decrby\", cmdDECRBY }, // pg\n { \"incr\", cmdINCR }, // pg\n { \"decr\", cmdDECR }, // pg\n { \"uincrby\", cmdINCRBY }, // pg unsigned detected in signed operation\n { \"udecrby\", cmdDECRBY }, // pg unsigned detected in signed operation\n { \"uincr\", cmdINCR }, // pg unsigned detected in signed operation\n { \"udecr\", cmdDECR }, // pg unsigned detected in signed operation\n { \"append\", cmdAPPEND }, // pg\n { \"prepend\", cmdPREPEND }, // pg\n { \"auth\", cmdAUTH }, // pg\n { \"save\", cmdSAVELOAD }, // pg\n { \"load\", cmdSAVELOAD }, // pg\n { \"stats\", cmdSTATS }, // pg memcache style stats\n};\n\nstatic void build_commands_table(void) {\n static __thread bool buckets_ready = false;\n static pthread_mutex_t cmd_build_lock = PTHREAD_MUTEX_INITIALIZER;\n static bool built = false;\n if (!buckets_ready) {\n pthread_mutex_lock(&cmd_build_lock);\n if (!built) {\n int ncmds = sizeof(cmds)/sizeof(struct cmd);\n int n = ncmds*8;\n nbuckets = 2;\n while (nbuckets < n) {\n nbuckets *= 2;\n }\n buckets = xmalloc(nbuckets*sizeof(struct cmd));\n memset(buckets, 0, nbuckets*sizeof(struct cmd));\n uint64_t hash;\n for (int i = 0; i < ncmds; i++) {\n hash = fnv1a_case(cmds[i].name, strlen(cmds[i].name));\n for (int j = 0; j < nbuckets; j++) {\n int k = (j+hash)&(nbuckets-1);\n if (!buckets[k].name) {\n buckets[k] = cmds[i];\n break;\n }\n }\n }\n built = true;\n }\n pthread_mutex_unlock(&cmd_build_lock);\n buckets_ready = true;\n }\n}\n\nstatic struct cmd *get_cmd(const char *name, size_t namelen) {\n build_commands_table();\n uint32_t hash = fnv1a_case(name, namelen);\n int j = hash&(nbuckets-1);\n while (1) {\n if (!buckets[j].name) {\n return 0;\n }\n if (argeq_bytes(name, namelen, buckets[j].name)) {\n return &buckets[j];\n }\n j++;\n }\n}\n\nvoid evcommand(struct conn *conn, struct args *args) {\n if (useauth && !conn_auth(conn)) {\n if (conn_proto(conn) == PROTO_HTTP) {\n // Let HTTP traffic through.\n // The request has already been authorized in http.c\n } else {\n cmdAUTH(conn, args);\n return;\n }\n }\n if (verb > 1) {\n if (!argeq(args, 0, \"auth\")) {\n args_print(args);\n }\n }\n struct cmd *cmd = get_cmd(args->bufs[0].data, args->bufs[0].len);\n if (cmd) {\n cmd->func(conn, args);\n } else {\n if (verb > 0) {\n printf(\"# Unknown command '%.*s'\\n\", (int)args->bufs[0].len,\n args->bufs[0].data);\n }\n char errmsg[128];\n snprintf(errmsg, sizeof(errmsg), \"ERR unknown command '%.*s'\", \n (int)args->bufs[0].len, args->bufs[0].data);\n conn_write_error(conn, errmsg);\n }\n}\n"], ["/pogocache/src/main.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit main.c is the main entry point for the Pogocache program.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"conn.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"save.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"pogocache.h\"\n#include \"gitinfo.h\"\n#include \"uring.h\"\n\n// default user flags\nint nthreads = 0; // number of client threads\nchar *port = \"9401\"; // default tcp port (non-tls)\nchar *host = \"127.0.0.1\"; // default hostname or ip address\nchar *persist = \"\"; // file to load and save data to\nchar *unixsock = \"\"; // use a unix socket\nchar *reuseport = \"no\"; // reuse tcp port for other programs\nchar *tcpnodelay = \"yes\"; // disable nagle's algorithm\nchar *quickack = \"no\"; // enable quick acks\nchar *usecas = \"no\"; // enable compare and store\nchar *keepalive = \"yes\"; // socket keepalive setting\nint backlog = 1024; // network socket accept backlog\nint queuesize = 128; // event queue size\nchar *maxmemory = \"80%\"; // Maximum memory allowed - 80% total system\nchar *evict = \"yes\"; // evict keys when maxmemory reached\nint loadfactor = 75; // hashmap load factor\nchar *keysixpack = \"yes\"; // use sixpack compression on keys\nchar *trackallocs = \"no\"; // track allocations (for debugging)\nchar *auth = \"\"; // auth token or pa\nchar *tlsport = \"\"; // enable tls over tcp port\nchar *tlscertfile = \"\"; // tls cert file\nchar *tlskeyfile = \"\"; // tls key file\nchar *tlscacertfile = \"\"; // tls ca cert file\nchar *uring = \"yes\"; // use uring (linux only)\nint maxconns = 1024; // maximum number of sockets\nchar *noticker = \"no\";\nchar *warmup = \"yes\";\n\n// Global variables calculated in main().\n// These should never change during the lifetime of the process.\n// Other source files must use the \"extern const\" specifier.\nchar *version;\nchar *githash;\nuint64_t seed;\nsize_t sysmem;\nsize_t memlimit;\nint verb; // verbosity, 0=no, 1=verbose, 2=very, 3=extremely\nbool usesixpack;\nint useallocator;\nbool usetrackallocs;\nbool useevict;\nint nshards;\nbool usetls; // use tls security (pemfile required);\nbool useauth; // use auth password\nbool usecolor; // allow color in terminal\nchar *useid; // instance id (unique to every process run)\nint64_t procstart; // proc start boot time, for uptime stat\n\n// Global atomic variable. These are safe to read and modify by other source\n// files, as long as those sources use \"atomic_\" methods.\natomic_int shutdownreq; // shutdown request counter\natomic_int_fast64_t flush_delay; // delay in seconds to next async flushall\natomic_bool sweep; // mark for async sweep, asap\natomic_bool registered; // registration is active\natomic_bool lowmem; // system is in low memory mode.\n\nstruct pogocache *cache;\n\n// min max robinhood load factor (75% performs pretty well)\n#define MINLOADFACTOR_RH 55\n#define MAXLOADFACTOR_RH 95\n\nstatic void ready(void *udata) {\n (void)udata;\n printf(\"* Ready to accept connections\\n\");\n}\n\n#define noopt \"%s\"\n\n#define HELP(format, ...) \\\n fprintf(file, format, ##__VA_ARGS__)\n\n#define HOPT(opt, desc, format, ...) \\\n fprintf(file, \" \"); \\\n fprintf(file, \"%-22s \", opt); \\\n fprintf(file, \"%-30s \", desc); \\\n if (strcmp(format, noopt) != 0) { \\\n fprintf(file, \"(default: \" format \")\", ##__VA_ARGS__); \\\n } \\\n fprintf(file, \"\\n\");\n\nstatic int calc_nshards(int nprocs) {\n switch (nprocs) {\n case 1: return 64;\n case 2: return 128;\n case 3: return 256;\n case 4: return 512;\n case 5: return 1024;\n case 6: return 2048;\n default: return 4096;\n }\n}\n\nstatic void showhelp(FILE *file) {\n int nprocs = sys_nprocs();\n int nshards = calc_nshards(nprocs);\n\n HELP(\"Usage: %s [options]\\n\", \"pogocache\");\n HELP(\"\\n\");\n\n HELP(\"Basic options:\\n\");\n HOPT(\"-h hostname\", \"listening host\", \"%s\", host);\n HOPT(\"-p port\", \"listening port\", \"%s\", port);\n HOPT(\"-s socket\", \"unix socket file\", \"%s\", *unixsock?unixsock:\"none\");\n\n HOPT(\"-v,-vv,-vvv\", \"verbose logging level\", noopt, \"\");\n HELP(\"\\n\");\n \n HELP(\"Additional options:\\n\");\n HOPT(\"--threads count\", \"number of threads\", \"%d\", nprocs);\n HOPT(\"--maxmemory value\", \"set max memory usage\", \"%s\", maxmemory);\n HOPT(\"--evict yes/no\", \"evict keys at maxmemory\", \"%s\", evict);\n HOPT(\"--persist path\", \"persistence file\", \"%s\", *persist?persist:\"none\");\n HOPT(\"--maxconns conns\", \"maximum connections\", \"%d\", maxconns);\n HELP(\"\\n\");\n \n HELP(\"Security options:\\n\");\n HOPT(\"--auth passwd\", \"auth token or password\", \"%s\", *auth?auth:\"none\");\n#ifndef NOOPENSSL\n HOPT(\"--tlsport port\", \"enable tls on port\", \"%s\", \"none\");\n HOPT(\"--tlscert certfile\", \"tls cert file\", \"%s\", \"none\");\n HOPT(\"--tlskey keyfile\", \"tls key file\", \"%s\", \"none\");\n HOPT(\"--tlscacert cacertfile\", \"tls ca-cert file\", \"%s\", \"none\");\n#endif\n HELP(\"\\n\");\n\n HELP(\"Advanced options:\\n\");\n HOPT(\"--shards count\", \"number of shards\", \"%d\", nshards);\n HOPT(\"--backlog count\", \"accept backlog\", \"%d\", backlog);\n HOPT(\"--queuesize count\", \"event queuesize size\", \"%d\", queuesize);\n HOPT(\"--reuseport yes/no\", \"reuseport for tcp\", \"%s\", reuseport);\n HOPT(\"--tcpnodelay yes/no\", \"disable nagle's algo\", \"%s\", tcpnodelay);\n HOPT(\"--quickack yes/no\", \"use quickack (linux)\", \"%s\", quickack);\n HOPT(\"--uring yes/no\", \"use uring (linux)\", \"%s\", uring);\n HOPT(\"--loadfactor percent\", \"hashmap load factor\", \"%d\", loadfactor);\n HOPT(\"--keysixpack yes/no\", \"sixpack compress keys\", \"%s\", keysixpack);\n HOPT(\"--cas yes/no\", \"use compare and store\", \"%s\", usecas);\n HELP(\"\\n\");\n}\n\nstatic void showversion(FILE *file) {\n#ifdef CCSANI\n fprintf(file, \"pogocache %s (CCSANI)\\n\", version);\n#else\n fprintf(file, \"pogocache %s\\n\", version);\n#endif\n}\n\nstatic size_t calc_memlimit(char *maxmemory) {\n if (strcmp(maxmemory, \"unlimited\") == 0) {\n return SIZE_MAX;\n }\n char *oval = maxmemory;\n while (isspace(*maxmemory)) {\n maxmemory++;\n }\n char *end;\n errno = 0;\n double mem = strtod(maxmemory, &end);\n if (errno || !(mem > 0) || !isfinite(mem)) {\n goto fail;\n }\n while (isspace(*end)) {\n end++;\n }\n #define exteq(c) \\\n (tolower(end[0])==c&& (!end[1]||(tolower(end[1])=='b'&&!end[2])))\n\n if (strcmp(end, \"\") == 0) {\n return mem;\n } else if (strcmp(end, \"%\") == 0) {\n return (((double)mem)/100.0) * sysmem;\n } else if (exteq('k')) {\n return mem*1024.0;\n } else if (exteq('m')) {\n return mem*1024.0*1024.0;\n } else if (exteq('g')) {\n return mem*1024.0*1024.0*1024.0;\n } else if (exteq('t')) {\n return mem*1024.0*1024.0*1024.0*1024.0;\n }\nfail:\n fprintf(stderr, \"# Invalid maxmemory '%s'\\n\", oval);\n showhelp(stderr);\n exit(1);\n}\n\nstatic size_t setmaxrlimit(void) {\n size_t maxconns = 0;\n struct rlimit rl;\n if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {\n maxconns = rl.rlim_max;\n rl.rlim_cur = rl.rlim_max;\n rl.rlim_max = rl.rlim_max;\n if (setrlimit(RLIMIT_NOFILE, &rl) != 0) {\n perror(\"# setrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n } else {\n perror(\"# getrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n return maxconns;\n}\n\nstatic void evicted(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)value, (void)valuelen, (void)expires, (void)udata;\n return;\n printf(\". evicted shard=%d, reason=%d, time=%\" PRIi64 \", key='%.*s'\"\n \", flags=%\" PRIu32 \", cas=%\" PRIu64 \"\\n\",\n shard, reason, time, (int)keylen, (char*)key, flags, cas);\n}\n\n#define BEGIN_FLAGS() \\\n if (0) {\n#define BFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option %s missing value\\n\", opt); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n }\n#define TFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n if (!dryrun) { \\\n op; \\\n }\n#define AFLAG(name, op) \\\n } else if (strcmp(argv[i], \"--\" name) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option --%s missing value\\n\", name); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n } \\\n } else if (strstr(argv[i], \"--\" name \"=\") == argv[i]) { \\\n if (!dryrun) { \\\n char *flag = argv[i]+strlen(name)+3; op; \\\n }\n#define END_FLAGS() \\\n } else { \\\n fprintf(stderr, \"# Unknown program option %s\\n\", argv[i]); \\\n exit(1); \\\n }\n\n#define INVALID_FLAG(name, value) \\\n fprintf(stderr, \"# Option --%s is invalid\\n\", name); \\\n exit(1);\n\nstatic atomic_bool loaded = false;\n\nvoid sigterm(int sig) {\n if (sig == SIGINT || sig == SIGTERM) {\n if (!atomic_load(&loaded) || !*persist) {\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n if (*persist) {\n printf(\"* Saving data to %s, please wait...\\n\", persist);\n int ret = save(persist, true);\n if (ret != 0) {\n perror(\"# Save failed\");\n _Exit(1);\n }\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n\n int count = atomic_fetch_add(&shutdownreq, 1);\n if (count > 0 && sig == SIGINT) {\n printf(\"# User forced shutdown\\n\");\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n }\n}\n\nstatic void tick(void) {\n if (!atomic_load_explicit(&loaded, __ATOMIC_ACQUIRE)) {\n return;\n }\n // Memory usage check\n if (memlimit < SIZE_MAX) {\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n size_t memusage = meminfo.rss;\n if (!lowmem) {\n if (memusage > memlimit) {\n atomic_store(&lowmem, true);\n if (verb > 0) {\n printf(\"# Low memory mode on\\n\");\n }\n }\n } else {\n if (memusage < memlimit) {\n atomic_store(&lowmem, false);\n if (verb > 0) {\n printf(\"# Low memory mode off\\n\");\n }\n }\n }\n }\n\n // Print allocations to terminal.\n if (usetrackallocs) {\n printf(\". keys=%zu, allocs=%zu, conns=%zu\\n\",\n pogocache_count(cache, 0), xallocs(), net_nconns());\n }\n\n}\n\nstatic void *ticker(void *arg) {\n (void)arg;\n while (1) {\n tick();\n sleep(1);\n }\n return 0;\n}\n\nstatic void listening(void *udata) {\n (void)udata;\n printf(\"* Network listener established\\n\");\n if (*persist) {\n if (!cleanwork(persist)) {\n // An error message has already been printed\n _Exit(0);\n }\n if (access(persist, F_OK) == 0) {\n printf(\"* Loading data from %s, please wait...\\n\", persist);\n struct load_stats stats;\n int64_t start = sys_now();\n int ret = load(persist, true, &stats);\n if (ret != 0) {\n perror(\"# Load failed\");\n _Exit(1);\n }\n double elapsed = (sys_now()-start)/1e9;\n printf(\"* Loaded %zu entries (%zu expired) (%.3f MB in %.3f secs) \"\n \"(%.0f entries/sec, %.0f MB/sec) \\n\", \n stats.ninserted, stats.nexpired,\n stats.csize/1024.0/1024.0, elapsed, \n (stats.ninserted+stats.nexpired)/elapsed, \n stats.csize/1024.0/1024.0/elapsed);\n }\n }\n atomic_store(&loaded, true);\n}\n\nstatic void yield(void *udata) {\n (void)udata;\n sched_yield();\n}\n\nint main(int argc, char *argv[]) {\n procstart = sys_now();\n\n // Intercept signals\n signal(SIGPIPE, SIG_IGN);\n signal(SIGINT, sigterm);\n signal(SIGTERM, sigterm);\n\n // Line buffer logging so pipes will stream.\n setvbuf(stdout, 0, _IOLBF, 0);\n setvbuf(stderr, 0, _IOLBF, 0);\n char guseid[17];\n memset(guseid, 0, 17);\n useid = guseid;\n sys_genuseid(useid); \n const char *maxmemorymb = 0;\n seed = sys_seed();\n verb = 0;\n usetls = false;\n useauth = false;\n lowmem = false;\n version = GITVERS;\n githash = GITHASH;\n\n \n\n\n if (uring_available()) {\n uring = \"yes\";\n } else {\n uring = \"no\";\n }\n\n atomic_init(&shutdownreq, 0);\n atomic_init(&flush_delay, 0);\n atomic_init(&sweep, false);\n atomic_init(®istered, false);\n\n // Parse program flags\n for (int ii = 0; ii < 2; ii++) {\n bool dryrun = ii == 0;\n for (int i = 1; i < argc; i++) {\n if (strcmp(argv[i], \"--help\") == 0) {\n showhelp(stdout);\n exit(0);\n }\n if (strcmp(argv[i], \"--version\") == 0) {\n showversion(stdout);\n exit(0);\n }\n BEGIN_FLAGS()\n BFLAG(\"-p\", port = flag)\n BFLAG(\"-h\", host = flag)\n BFLAG(\"-s\", unixsock = flag)\n TFLAG(\"-v\", verb = 1)\n TFLAG(\"-vv\", verb = 2)\n TFLAG(\"-vvv\", verb = 3)\n AFLAG(\"port\", port = flag)\n AFLAG(\"threads\", nthreads = atoi(flag))\n AFLAG(\"shards\", nshards = atoi(flag))\n AFLAG(\"backlog\", backlog = atoi(flag))\n AFLAG(\"queuesize\", queuesize = atoi(flag))\n AFLAG(\"maxmemory\", maxmemory = flag)\n AFLAG(\"evict\", evict = flag)\n AFLAG(\"reuseport\", reuseport = flag)\n AFLAG(\"uring\", uring = flag)\n AFLAG(\"tcpnodelay\", tcpnodelay = flag)\n AFLAG(\"keepalive\", keepalive = flag)\n AFLAG(\"quickack\", quickack = flag)\n AFLAG(\"trackallocs\", trackallocs = flag)\n AFLAG(\"cas\", usecas = flag)\n AFLAG(\"maxconns\", maxconns = atoi(flag))\n AFLAG(\"loadfactor\", loadfactor = atoi(flag))\n AFLAG(\"sixpack\", keysixpack = flag)\n AFLAG(\"seed\", seed = strtoull(flag, 0, 10))\n AFLAG(\"auth\", auth = flag)\n AFLAG(\"persist\", persist = flag)\n AFLAG(\"noticker\", noticker = flag)\n AFLAG(\"warmup\", warmup = flag)\n#ifndef NOOPENSSL\n // TLS flags\n AFLAG(\"tlsport\", tlsport = flag)\n AFLAG(\"tlscert\", tlscertfile = flag)\n AFLAG(\"tlscacert\", tlscacertfile = flag)\n AFLAG(\"tlskey\", tlskeyfile = flag)\n#endif\n // Hidden or alternative flags\n BFLAG(\"-t\", nthreads = atoi(flag)) // --threads=\n BFLAG(\"-m\", maxmemorymb = flag) // --maxmemory=M\n TFLAG(\"-M\", evict = \"no\") // --evict=no\n END_FLAGS()\n }\n }\n\n usecolor = isatty(fileno(stdout));\n\n if (strcmp(evict, \"yes\") == 0) {\n useevict = true;\n } else if (strcmp(evict, \"no\") == 0) {\n useevict = false;\n } else {\n INVALID_FLAG(\"evict\", evict);\n }\n\n bool usereuseport;\n if (strcmp(reuseport, \"yes\") == 0) {\n usereuseport = true;\n } else if (strcmp(reuseport, \"no\") == 0) {\n usereuseport = false;\n } else {\n INVALID_FLAG(\"reuseport\", reuseport);\n }\n\n if (strcmp(trackallocs, \"yes\") == 0) {\n usetrackallocs = true;\n } else if (strcmp(trackallocs, \"no\") == 0) {\n usetrackallocs = false;\n } else {\n INVALID_FLAG(\"trackallocs\", trackallocs);\n }\n\n bool usetcpnodelay;\n if (strcmp(tcpnodelay, \"yes\") == 0) {\n usetcpnodelay = true;\n } else if (strcmp(tcpnodelay, \"no\") == 0) {\n usetcpnodelay = false;\n } else {\n INVALID_FLAG(\"tcpnodelay\", tcpnodelay);\n }\n\n bool usekeepalive;\n if (strcmp(keepalive, \"yes\") == 0) {\n usekeepalive = true;\n } else if (strcmp(keepalive, \"no\") == 0) {\n usekeepalive = false;\n } else {\n INVALID_FLAG(\"keepalive\", keepalive);\n }\n\n\n bool usecasflag;\n if (strcmp(usecas, \"yes\") == 0) {\n usecasflag = true;\n } else if (strcmp(usecas, \"no\") == 0) {\n usecasflag = false;\n } else {\n INVALID_FLAG(\"usecas\", usecas);\n }\n\n if (maxconns <= 0) {\n maxconns = 1024;\n }\n\n\n#ifndef __linux__\n bool useuring = false;\n#else\n bool useuring;\n if (strcmp(uring, \"yes\") == 0) {\n useuring = true;\n } else if (strcmp(uring, \"no\") == 0) {\n useuring = false;\n } else {\n INVALID_FLAG(\"uring\", uring);\n }\n if (useuring) {\n if (!uring_available()) {\n useuring = false;\n }\n }\n#endif\n\n#ifndef __linux__\n quickack = \"no\";\n#endif\n bool usequickack;\n if (strcmp(quickack, \"yes\") == 0) {\n usequickack = true;\n } else if (strcmp(quickack, \"no\") == 0) {\n usequickack = false;\n } else {\n INVALID_FLAG(\"quickack\", quickack);\n }\n\n if (strcmp(keysixpack, \"yes\") == 0) {\n usesixpack = true;\n } else if (strcmp(keysixpack, \"no\") == 0) {\n usesixpack = false;\n } else {\n INVALID_FLAG(\"sixpack\", keysixpack);\n }\n\n // Threads\n if (nthreads <= 0) {\n nthreads = sys_nprocs();\n } else if (nthreads > 4096) {\n nthreads = 4096; \n }\n\n if (nshards == 0) {\n nshards = calc_nshards(nthreads);\n }\n if (nshards <= 0 || nshards > 65536) {\n nshards = 65536;\n }\n\n if (loadfactor < MINLOADFACTOR_RH) {\n loadfactor = MINLOADFACTOR_RH;\n printf(\"# loadfactor minumum set to %d\\n\", MINLOADFACTOR_RH);\n } else if (loadfactor > MAXLOADFACTOR_RH) {\n loadfactor = MAXLOADFACTOR_RH;\n printf(\"# loadfactor maximum set to %d\\n\", MAXLOADFACTOR_RH);\n }\n\n if (queuesize < 1) {\n queuesize = 1;\n printf(\"# queuesize adjusted to 1\\n\");\n } else if (queuesize > 4096) {\n queuesize = 4096;\n printf(\"# queuesize adjusted to 4096\\n\");\n }\n\n if (maxmemorymb) {\n size_t sz = strlen(maxmemorymb)+2;\n char *str = xmalloc(sz);\n snprintf(str, sz, \"%sM\", maxmemorymb);\n maxmemory = str;\n }\n\n if (!*port || strcmp(port, \"0\") == 0) {\n port = \"\";\n }\n\n if (!*tlsport || strcmp(tlsport, \"0\") == 0) {\n usetls = false;\n tlsport = \"\";\n } else {\n usetls = true;\n tls_init();\n }\n\n if (*auth) {\n useauth = true;\n }\n setmaxrlimit();\n sysmem = sys_memory();\n memlimit = calc_memlimit(maxmemory);\n\n if (memlimit == SIZE_MAX) {\n evict = \"no\";\n useevict = false;\n }\n\n struct pogocache_opts opts = {\n .yield = yield,\n .seed = seed,\n .malloc = xmalloc,\n .free = xfree,\n .nshards = nshards,\n .loadfactor = loadfactor,\n .usecas = usecasflag,\n .evicted = evicted,\n .allowshrink = true,\n .usethreadbatch = true,\n };\n // opts.yield = 0;\n\n cache = pogocache_new(&opts);\n if (!cache) {\n perror(\"pogocache_new\");\n abort();\n }\n\n // Print the program details\n printf(\"* Pogocache (pid: %d, arch: %s%s, version: %s, git: %s)\\n\",\n getpid(), sys_arch(), sizeof(uintptr_t)==4?\", mode: 32-bit\":\"\", version,\n githash);\n char buf0[64], buf1[64];\n char buf2[64];\n if (memlimit < SIZE_MAX) {\n snprintf(buf2, sizeof(buf2), \"%.0f%%/%s\", (double)memlimit/sysmem*100.0,\n memstr(memlimit, buf1));\n } else {\n strcpy(buf2, \"unlimited\");\n }\n printf(\"* Memory (system: %s, max: %s, evict: %s)\\n\", memstr(sysmem, buf0),\n buf2, evict);\n printf(\"* Features (verbosity: %s, sixpack: %s, cas: %s, persist: %s, \"\n \"uring: %s)\\n\",\n verb==0?\"normal\":verb==1?\"verbose\":verb==2?\"very\":\"extremely\",\n keysixpack, usecas, *persist?persist:\"none\", useuring?\"yes\":\"no\");\n char tcp_addr[256];\n snprintf(tcp_addr, sizeof(tcp_addr), \"%s:%s\", host, port);\n printf(\"* Network (port: %s, unixsocket: %s, backlog: %d, reuseport: %s, \"\n \"maxconns: %d)\\n\", *port?port:\"none\", *unixsock?unixsock:\"none\",\n backlog, reuseport, maxconns);\n printf(\"* Socket (tcpnodelay: %s, keepalive: %s, quickack: %s)\\n\",\n tcpnodelay, keepalive, quickack);\n printf(\"* Threads (threads: %d, queuesize: %d)\\n\", nthreads, queuesize);\n printf(\"* Shards (shards: %d, loadfactor: %d%%)\\n\", nshards, loadfactor);\n printf(\"* Security (auth: %s, tlsport: %s)\\n\", \n strlen(auth)>0?\"enabled\":\"disabled\", *tlsport?tlsport:\"none\");\n if (strcmp(noticker,\"yes\") == 0) {\n printf(\"# NO TICKER\\n\");\n } else {\n pthread_t th;\n int ret = pthread_create(&th, 0, ticker, 0);\n if (ret == -1) {\n perror(\"# pthread_create(ticker)\");\n exit(1);\n }\n }\n#ifdef DATASETOK\n printf(\"# DATASETOK\\n\");\n#endif\n#ifdef CMDGETNIL\n printf(\"# CMDGETNIL\\n\");\n#endif\n#ifdef CMDSETOK\n printf(\"# CMDSETOK\\n\");\n#endif\n#ifdef ENABLELOADREAD\n printf(\"# ENABLELOADREAD\\n\");\n#endif\n struct net_opts nopts = {\n .host = host,\n .port = port,\n .tlsport = tlsport,\n .unixsock = unixsock,\n .reuseport = usereuseport,\n .tcpnodelay = usetcpnodelay,\n .keepalive = usekeepalive,\n .quickack = usequickack,\n .backlog = backlog,\n .queuesize = queuesize,\n .nthreads = nthreads,\n .nowarmup = strcmp(warmup, \"no\") == 0,\n .nouring = !useuring,\n .listening = listening,\n .ready = ready,\n .data = evdata,\n .opened = evopened,\n .closed = evclosed,\n .maxconns = maxconns,\n };\n net_main(&nopts);\n return 0;\n}\n"], ["/pogocache/src/util.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit util.c provides various utilities and convenience functions.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n\n// Performs a case-insenstive equality test between the byte slice 'data' and\n// a c-string. It's expected that c-string is already lowercase and \n// null-terminated. The data does not need to be null-terminated.\nbool argeq_bytes(const void *data, size_t datalen, const char *cstr) {\n const char *p = data;\n const char *e = p+datalen;\n bool eq = true;\n while (eq && p < e && *cstr) {\n eq = tolower(*p) == *cstr;\n p++;\n cstr++;\n }\n return eq && *cstr == '\\0' && p == e;\n}\n\nbool argeq(struct args *args, int idx, const char *cstr) {\n return argeq_bytes(args->bufs[idx].data, args->bufs[idx].len, cstr);\n}\n\n// Safely adds two int64_t values and with clamping on overflow.\nint64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n// Safely multiplies two int64_t values and with clamping on overflow.\nint64_t int64_mul_clamp(int64_t a, int64_t b) {\n if (a || b) {\n if (a > 0) {\n if (b > 0 && a > INT64_MAX / b) {\n return INT64_MAX;\n } else if (b < 0 && b < INT64_MIN / a) {\n return INT64_MIN;\n }\n } else {\n if (b > 0 && a < INT64_MIN / b) {\n return INT64_MIN;\n } else if (b < 0 && a < INT64_MAX / b) {\n return INT64_MAX;\n }\n }\n }\n return a * b;\n}\n\n/// https://github.com/tidwall/varint.c\nint varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nint varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\nint varint_write_i64(void *data, int64_t x) {\n uint64_t ux = (uint64_t)x << 1;\n ux = x < 0 ? ~ux : ux;\n return varint_write_u64(data, ux);\n}\n\nint varint_read_i64(const void *data, size_t len, int64_t *x) {\n uint64_t ux;\n int n = varint_read_u64(data, len, &ux);\n *x = (int64_t)(ux >> 1);\n *x = ux&1 ? ~*x : *x;\n return n;\n}\n\n\nconst char *memstr(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0fB\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fK\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fM\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1fG\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0G\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0M\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0K\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\nconst char *memstr_long(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0f bytes\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f KB\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f MB\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1f GB\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0 GB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 MB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 KB\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nuint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\nuint64_t rand_next(uint64_t *seed) {\n // pcg + mix13\n *seed = (*seed * UINT64_C(6364136223846793005)) + 1;\n return mix13(*seed);\n}\n\nvoid write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n bytes[4] = (x>>32)&0xFF;\n bytes[5] = (x>>40)&0xFF;\n bytes[6] = (x>>48)&0xFF;\n bytes[7] = (x>>56)&0xFF;\n}\n\nuint64_t read_u64(const void *data) {\n const uint8_t *bytes = data;\n uint64_t x = 0;\n x |= ((uint64_t)bytes[0])<<0;\n x |= ((uint64_t)bytes[1])<<8;\n x |= ((uint64_t)bytes[2])<<16;\n x |= ((uint64_t)bytes[3])<<24;\n x |= ((uint64_t)bytes[4])<<32;\n x |= ((uint64_t)bytes[5])<<40;\n x |= ((uint64_t)bytes[6])<<48;\n x |= ((uint64_t)bytes[7])<<56;\n return x;\n}\n\nvoid write_u32(void *data, uint32_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n}\n\nuint32_t read_u32(const void *data) {\n const uint8_t *bytes = data;\n uint32_t x = 0;\n x |= ((uint32_t)bytes[0])<<0;\n x |= ((uint32_t)bytes[1])<<8;\n x |= ((uint32_t)bytes[2])<<16;\n x |= ((uint32_t)bytes[3])<<24;\n return x;\n}\n\n// https://www.w3.org/TR/2003/REC-PNG-20031110/#D-CRCAppendix\nuint32_t crc32(const void *data, size_t len) {\n static __thread uint32_t table[256];\n static __thread bool computed = false;\n if (!computed) {\n for (uint32_t n = 0; n < 256; n++) {\n uint32_t c = n;\n for (int k = 0; k < 8; k++) {\n c = (c&1)?0xedb88320L^(c>>1):c>>1;\n }\n table[n] = c;\n }\n computed = true;\n }\n uint32_t crc = ~0;\n const uint8_t *buf = data;\n for (size_t n = 0; n < len; n++) {\n crc = table[(crc^buf[n])&0xff]^(crc>>8);\n }\n return ~crc;\n}\n\n// Attempts to read exactly len bytes from file stream\n// Returns the number of bytes read. Anything less than len means the stream\n// was closed or an error occured while reading.\n// Return -1 if no bytes were read and there was an error.\nssize_t read_full(int fd, void *data, size_t len) {\n uint8_t *bytes = data;\n size_t total = 0;\n while (len > 0) {\n ssize_t n = read(fd, bytes+total, len);\n if (n <= 0) {\n if (total > 0) {\n break;\n }\n return n;\n }\n len -= n;\n total += n;\n }\n return total;\n}\n\nsize_t u64toa(uint64_t x, uint8_t *data) {\n if (x < 10) {\n data[0] = '0'+x;\n return 1;\n }\n size_t i = 0;\n do {\n data[i++] = '0' + x % 10;\n } while ((x /= 10) > 0);\n // reverse the characters\n for (size_t j = 0, k = i-1; j < k; j++, k--) {\n uint8_t ch = data[j];\n data[j] = data[k];\n data[k] = ch;\n }\n return i;\n}\n\nsize_t i64toa(int64_t x, uint8_t *data) {\n if (x < 0) {\n data[0] = '-';\n data++;\n return u64toa(x * -1, data) + 1;\n }\n return u64toa(x, data);\n}\n\nuint32_t fnv1a_case(const char* buf, size_t len) {\n uint32_t hash = 0x811c9dc5;\n for (size_t i = 0; i < len; i++) {\n hash = (hash ^ tolower(buf[i])) * 0x01000193;\n }\n\treturn hash;\n}\n\nbool parse_i64(const char *data, size_t len, int64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n errno = 0;\n char *end;\n *x = strtoll(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool parse_u64(const char *data, size_t len, uint64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n if (buf[0] == '-') {\n return false;\n }\n errno = 0;\n char *end;\n *x = strtoull(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool argi64(struct args *args, int idx, int64_t *x) {\n return parse_i64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nbool argu64(struct args *args, int idx, uint64_t *x) {\n return parse_u64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nvoid *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\nvoid store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// Increment a morris counter. The counter is clipped to 31 bits\nuint8_t morris_incr(uint8_t morris, uint64_t rand) {\n return morris>=31?31:morris+!(rand&((UINT64_C(1)< '~') {\n printf(\"\\\\x%02x\", c);\n } else {\n printf(\"%c\", c);\n }\n }\n}\n"], ["/pogocache/src/save.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit save.c provides an interface for saving and loading Pogocache\n// data files.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"pogocache.h\"\n#include \"buf.h\"\n#include \"util.h\"\n#include \"lz4.h\"\n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#define BLOCKSIZE 1048576\n#define COMPRESS\n\nextern struct pogocache *cache;\nextern const int verb;\n\nstruct savectx {\n pthread_t th; // work thread\n int index; // thread index\n pthread_mutex_t *lock; // write lock\n int fd; // work file descriptor\n int start; // current shard\n int count; // number of shards to process\n struct buf buf; // block buffer\n bool ok; // final ok\n int errnum; // final errno status\n struct buf dst; // compressed buffer space\n size_t nentries; // number of entried in block buffer\n};\n\nstatic int flush(struct savectx *ctx) {\n if (ctx->nentries == 0) {\n ctx->buf.len = 0;\n return 0;\n }\n // Make sure that there's enough space in the dst buffer to store the\n // header (16 bytes) and the compressed data.\n size_t bounds = LZ4_compressBound(ctx->buf.len);\n buf_ensure(&ctx->dst, 16+bounds);\n // Compress the block\n uint32_t len = LZ4_compress_default((char*)ctx->buf.data, \n (char*)ctx->dst.data+16, ctx->buf.len, bounds);\n // The block is now compressed.\n // Genreate a checksum of the compressed data.\n uint32_t crc = crc32(ctx->dst.data+16, len);\n // Write the 16 byte header\n // (0-3) 'POGO' tag\n memcpy(ctx->dst.data, \"POGO\", 4);\n // (4-7) Checksum\n write_u32(ctx->dst.data+4, crc);\n // (8-11) Len of decompressed data \n write_u32(ctx->dst.data+8, ctx->buf.len);\n // (12-15) Len of compressed data \n write_u32(ctx->dst.data+12, len);\n // The rest of the dst buffer contains the compressed bytes\n uint8_t *p = (uint8_t*)ctx->dst.data;\n uint8_t *end = p + len+16;\n bool ok = true;\n pthread_mutex_lock(ctx->lock);\n while (p < end) {\n ssize_t n = write(ctx->fd, p, end-p);\n if (n < 0) {\n ok = false;\n break;\n }\n p += n;\n }\n pthread_mutex_unlock(ctx->lock);\n ctx->buf.len = 0;\n ctx->nentries = 0;\n return ok ? 0 : -1;\n};\n\nstatic int save_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard;\n struct savectx *ctx = udata;\n buf_append_byte(&ctx->buf, 0); // entry type. zero=k/v string pair;\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n buf_append_uvarint(&ctx->buf, valuelen);\n buf_append(&ctx->buf, value, valuelen);\n if (expires > 0) {\n int64_t ttl = expires-time;\n assert(ttl > 0);\n buf_append_uvarint(&ctx->buf, ttl);\n } else {\n buf_append_uvarint(&ctx->buf, 0);\n }\n buf_append_uvarint(&ctx->buf, flags);\n buf_append_uvarint(&ctx->buf, cas);\n ctx->nentries++;\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void *thsave(void *arg) {\n struct savectx *ctx = arg;\n for (int i = 0; i < ctx->count; i++) {\n int shardidx = ctx->start+i;\n struct pogocache_iter_opts opts = {\n .oneshard = true,\n .oneshardidx = shardidx,\n .time = sys_now(),\n .entry = save_entry,\n .udata = ctx,\n };\n // write the unix timestamp before entries\n buf_append_uvarint(&ctx->buf, sys_unixnow());\n int status = pogocache_iter(cache, &opts);\n if (status == POGOCACHE_CANCELED) {\n goto done;\n }\n if (flush(ctx) == -1) {\n goto done;\n }\n }\n ctx->ok = true;\ndone:\n buf_clear(&ctx->buf);\n buf_clear(&ctx->dst);\n ctx->errnum = errno;\n return 0;\n}\n\nint save(const char *path, bool fast) {\n uint64_t seed = sys_seed();\n size_t psize = strlen(path)+32;\n char *workpath = xmalloc(psize);\n snprintf(workpath, psize, \"%s.%08x.pogocache.work\", path, \n (int)(seed%INT_MAX));\n if (verb > 1) {\n printf(\". Saving to work file %s\\n\", workpath);\n }\n int fd = open(workpath, O_RDWR|O_CREAT, S_IRUSR|S_IRGRP|S_IROTH);\n if (fd == -1) {\n return -1;\n }\n int nshards = pogocache_nshards(cache);\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n if (!fast) {\n nprocs = 1;\n }\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n struct savectx *ctxs = xmalloc(nprocs*sizeof(struct savectx));\n memset(ctxs, 0, nprocs*sizeof(struct savectx));\n bool ok = false;\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n ctx->index = i;\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->fd = fd;\n ctx->lock = &lock;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (nprocs > 1) {\n if (pthread_create(&ctx->th, 0, thsave, ctx) == -1) {\n ctx->th = 0;\n }\n }\n start += ctx->count;\n }\n // execute operations on failed threads (or fast=false)\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thsave(ctx);\n }\n }\n // wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n // check for any failures\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (!ctx->ok) {\n errno = ctx->errnum;\n goto done;\n }\n }\n // Move file work file to final path\n if (rename(workpath, path) == -1) {\n goto done;\n }\n ok = true;\ndone:\n close(fd);\n unlink(workpath);\n xfree(workpath);\n xfree(ctxs);\n return ok ? 0 : -1;\n}\n\n// compressed block\nstruct cblock {\n struct buf cdata; // compressed data\n size_t dlen; // decompressed size\n};\n\nstruct loadctx {\n pthread_t th;\n\n // shared context\n pthread_mutex_t *lock;\n pthread_cond_t *cond;\n bool *donereading; // shared done flag\n int *nblocks; // number of blocks in queue\n struct cblock *blocks; // the block queue\n bool *failure; // a thread will set this upon error\n\n // thread status\n atomic_bool ok;\n int errnum;\n size_t ninserted;\n size_t nexpired;\n};\n\nstatic bool load_block(struct cblock *block, struct loadctx *ctx) {\n (void)ctx;\n bool ok = false;\n\n int64_t now = sys_now();\n int64_t unixnow = sys_unixnow();\n\n // decompress block\n char *ddata = xmalloc(block->dlen);\n int ret = LZ4_decompress_safe(block->cdata.data, ddata, block->cdata.len, \n block->dlen);\n if (ret < 0 || (size_t)ret != block->dlen) {\n printf(\". bad compressed block\\n\");\n goto done;\n }\n buf_clear(&block->cdata);\n uint8_t *p = (void*)ddata;\n uint8_t *e = p + block->dlen;\n\n int n;\n uint64_t x;\n // read unix time\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n printf(\". bad unix time\\n\");\n goto done;\n }\n p += n;\n\n int64_t unixtime = x;\n // printf(\". unixtime=%lld\\n\", unixtime);\n\n // Read each entry from decompressed data\n while (e > p) {\n /////////////////////\n // kind\n uint8_t kind = *(p++);\n \n if (kind != 0) {\n // only k/v strings allowed at this time.\n printf(\">> %d\\n\", kind);\n printf(\". unknown kind\\n\");\n goto done;\n }\n /////////////////////\n // key\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t keylen = x;\n if ((size_t)(e-p) < keylen) {\n goto done;\n }\n const uint8_t *key = p;\n p += keylen;\n /////////////////////\n // val\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t vallen = x;\n if ((size_t)(e-p) < vallen) {\n goto done;\n }\n const uint8_t *val = p;\n p += vallen;\n /////////////////////\n // ttl\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n goto done;\n }\n int64_t ttl = x;\n p += n;\n /////////////////////\n // flags\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > UINT32_MAX) {\n goto done;\n }\n uint32_t flags = x;\n p += n;\n /////////////////////\n // cas\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0) {\n goto done;\n }\n uint64_t cas = x;\n p += n;\n if (ttl > 0) {\n int64_t unixexpires = int64_add_clamp(unixtime, ttl);\n if (unixexpires < unixnow) {\n // already expired, skip this entry\n ctx->nexpired++;\n continue;\n }\n ttl = unixexpires-unixnow;\n }\n struct pogocache_store_opts opts = {\n .flags = flags,\n .time = now,\n .ttl = ttl,\n .cas = cas,\n };\n // printf(\"[%.*s]=[%.*s]\\n\", (int)keylen, key, (int)vallen, val);\n int ret = pogocache_store(cache, key, keylen, val, vallen, &opts);\n (void)ret;\n assert(ret == POGOCACHE_INSERTED || ret == POGOCACHE_REPLACED);\n ctx->ninserted++;\n }\n ok = true;\ndone:\n buf_clear(&block->cdata);\n xfree(ddata);\n if (!ok) {\n printf(\". bad block\\n\");\n }\n return ok;\n}\n\nstatic void *thload(void *arg) {\n struct loadctx *ctx = arg;\n pthread_mutex_lock(ctx->lock);\n while (1) {\n if (*ctx->failure) {\n break;\n }\n if (*ctx->nblocks > 0) {\n // Take a block for processing\n struct cblock block = ctx->blocks[(*ctx->nblocks)-1];\n (*ctx->nblocks)--;\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n ctx->ok = load_block(&block, ctx);\n pthread_mutex_lock(ctx->lock);\n if (!ctx->ok) {\n *ctx->failure = true;\n break;\n }\n // next block\n continue;\n }\n if (*ctx->donereading) {\n break;\n }\n pthread_cond_wait(ctx->cond, ctx->lock);\n }\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n if (!ctx->ok) {\n ctx->errnum = errno;\n }\n return 0;\n}\n\n// load data into cache from path\nint load(const char *path, bool fast, struct load_stats *stats) {\n // Use a single stream reader. Handing off blocks to threads.\n struct load_stats sstats;\n if (!stats) {\n stats = &sstats;\n }\n memset(stats, 0, sizeof(struct load_stats));\n\n int fd = open(path, O_RDONLY);\n if (fd == -1) {\n return -1;\n }\n\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n pthread_cond_t cond = PTHREAD_COND_INITIALIZER;\n bool donereading = false;\n bool failure = false;\n\n int nprocs = fast ? sys_nprocs() : 1;\n struct loadctx *ctxs = xmalloc(nprocs*sizeof(struct loadctx));\n memset(ctxs, 0, nprocs*sizeof(struct loadctx));\n int nblocks = 0;\n struct cblock *blocks = xmalloc(sizeof(struct cblock)*nprocs);\n memset(blocks, 0, sizeof(struct cblock)*nprocs);\n int therrnum = 0;\n bool ok = true;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n ctx->lock = &lock;\n ctx->cond = &cond;\n ctx->donereading = &donereading;\n ctx->nblocks = &nblocks;\n ctx->failure = &failure;\n ctx->blocks = blocks;\n atomic_init(&ctx->ok, true);\n if (pthread_create(&ctx->th, 0, thload, ctx) == -1) {\n ctx->th = 0;\n ok = false;\n if (therrnum == 0) {\n therrnum = errno;\n }\n }\n }\n if (!ok) {\n // there was an error creating a thread. \n // At this point there may be some orphaned threads waiting on \n // a condition variable. \n goto shutdown_threads;\n }\n\n // Read the blocks from file, one at a time, handing putting blocks into\n // the 'blocks' queue. The running threads will pick these up and \n // process them in no specific order.\n struct buf cdata = { 0 };\n bool shortread = false;\n while (ok) {\n uint8_t head[16];\n ssize_t size = read(fd, head, 16);\n if (size <= 0) {\n if (size == -1) {\n ok = false;\n }\n break;\n }\n if (size < 16) {\n printf(\". bad head size\\n\");\n ok = false;\n break;\n }\n if (memcmp(head, \"POGO\", 4) != 0) {\n printf(\". missing 'POGO'\\n\");\n ok = false;\n break;\n }\n uint32_t crc;\n memcpy(&crc, head+4, 4);\n size_t dlen = read_u32(head+8);\n size_t clen = read_u32(head+12);\n buf_ensure(&cdata, clen);\n bool okread = true;\n size_t total = 0;\n while (total < clen) {\n ssize_t rlen = read(fd, cdata.data+total, clen-total);\n if (rlen <= 0) {\n shortread = true;\n okread = false;\n break;\n }\n total += rlen;\n }\n if (!okread) {\n if (shortread) {\n printf(\". shortread\\n\");\n }\n ok = false;\n break;\n }\n cdata.len = clen;\n stats->csize += clen;\n stats->dsize += dlen;\n uint32_t crc2 = crc32(cdata.data, clen);\n if (crc2 != crc) {\n printf(\". bad crc\\n\");\n ok = false;\n goto bdone;\n }\n // We have a good block. Push it into the queue\n pthread_mutex_lock(&lock);\n while (1) {\n if (failure) {\n // A major error occured, stop reading now\n ok = false;\n break;\n }\n if (nblocks == nprocs) {\n // Queue is currently filled up.\n // Wait and try again.\n pthread_cond_wait(&cond, &lock);\n continue;\n }\n // Add block to queue\n blocks[nblocks++] = (struct cblock){ \n .cdata = cdata,\n .dlen = dlen,\n };\n memset(&cdata, 0, sizeof(struct buf));\n pthread_cond_broadcast(&cond);\n break;\n }\n pthread_mutex_unlock(&lock);\n }\nbdone:\n buf_clear(&cdata);\n\n\nshutdown_threads:\n // Stop all threads\n pthread_mutex_lock(&lock);\n donereading = true;\n pthread_mutex_unlock(&lock);\n pthread_cond_broadcast(&cond);\n\n // Wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n stats->nexpired += ctx->nexpired;\n stats->ninserted += ctx->ninserted;\n }\n }\n\n // Get the current error, if any\n errno = 0;\n ok = ok && !failure;\n if (!ok) {\n errno = therrnum;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n if (!ctx->ok) {\n errno = ctx->errnum;\n break;\n }\n }\n }\n }\n\n // Free all resources.\n for (int i = 0; i < nblocks; i++) {\n buf_clear(&blocks[i].cdata);\n }\n xfree(blocks);\n xfree(ctxs);\n close(fd);\n return ok ? 0 : -1;\n}\n\n// removes all work files and checks that the current directory is valid.\nbool cleanwork(const char *persist) {\n if (*persist == '\\0') {\n return false;\n }\n bool ok = false;\n char *path = xmalloc(strlen(persist)+1);\n strcpy(path, persist);\n char *dirpath = dirname(path);\n DIR *dir = opendir(dirpath);\n if (!dir) {\n perror(\"# opendir\");\n goto done;\n }\n struct dirent *entry;\n while ((entry = readdir(dir))) {\n if (entry->d_type != DT_REG) {\n continue;\n }\n const char *ext = \".pogocache.work\";\n if (strlen(entry->d_name) < strlen(ext) ||\n strcmp(entry->d_name+strlen(entry->d_name)-strlen(ext), ext) != 0)\n {\n continue;\n }\n size_t filepathcap = strlen(dirpath)+1+strlen(entry->d_name)+1;\n char *filepath = xmalloc(filepathcap);\n snprintf(filepath, filepathcap, \"%s/%s\", dirpath, entry->d_name);\n if (unlink(filepath) == 0) {\n printf(\"# deleted work file %s\\n\", filepath);\n } else {\n perror(\"# unlink\");\n }\n xfree(filepath);\n }\n ok = true;\ndone:\n if (dir) {\n closedir(dir);\n }\n xfree(path);\n return ok;\n}\n"], ["/pogocache/src/sys.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit sys.c provides various system-level functions.\n#if __linux__\n#define _GNU_SOURCE\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#ifdef __APPLE__\n#include \n#include \n#endif\n#include \"sys.h\"\n\nint sys_nprocs(void) {\n static atomic_int nprocsa = 0;\n int nprocs = atomic_load_explicit(&nprocsa, __ATOMIC_RELAXED);\n if (nprocs > 0) {\n return nprocs;\n }\n int logical = sysconf(_SC_NPROCESSORS_CONF);\n logical = logical < 1 ? 1 : logical;\n int physical = logical;\n int affinity = physical;\n#ifdef __linux__\n affinity = 0;\n cpu_set_t mask;\n CPU_ZERO(&mask);\n if (sched_getaffinity(0, sizeof(mask), &mask) == -1) {\n perror(\"sched_getaffinity\");\n return 1;\n }\n for (int i = 0; i < CPU_SETSIZE; i++) {\n if (CPU_ISSET(i, &mask)) {\n affinity++;\n }\n }\n double hyper = ceil((double)logical / (double)physical);\n hyper = hyper < 1 ? 1 : hyper;\n affinity /= hyper;\n#endif\n nprocs = affinity;\n nprocs = nprocs < 1 ? 1 : nprocs;\n atomic_store_explicit(&nprocsa, nprocs, __ATOMIC_RELAXED);\n return nprocs;\n}\n\n#ifndef __linux__\n#include \n#endif\n\nsize_t sys_memory(void) {\n size_t sysmem = 0;\n#ifdef __linux__\n FILE *f = fopen(\"/proc/meminfo\", \"rb\");\n if (f) {\n char buf[4096];\n size_t n = fread(buf, 1, sizeof(buf)-1, f);\n buf[n] = '\\0';\n char *s = 0;\n char *e = 0;\n s = strstr(buf, \"MemTotal\");\n if (s) s = strstr(s, \": \");\n if (s) e = strstr(s, \"\\n\");\n if (e) {\n *e = '\\0';\n s += 2;\n while (isspace(*s)) s++;\n if (strstr(s, \" kB\")) {\n s[strstr(s, \" kB\")-s] = '\\0';\n }\n errno = 0;\n char *end;\n int64_t isysmem = strtoll(s, &end, 10);\n assert(errno == 0 && isysmem > 0);\n isysmem *= 1024;\n sysmem = isysmem;\n }\n fclose(f);\n }\n#else\n size_t memsize = 0;\n size_t len = sizeof(memsize);\n if (sysctlbyname(\"hw.memsize\", &memsize, &len, 0, 0) == 0) {\n sysmem = memsize;\n }\n#endif\n if (sysmem == 0) {\n fprintf(stderr, \"# could not detect total system memory, bailing\\n\");\n exit(1);\n }\n return sysmem;\n}\n\nuint64_t sys_seed(void) {\n #define NSEEDCAP 64\n static __thread int nseeds = 0;\n static __thread uint64_t seeds[NSEEDCAP];\n if (nseeds == 0) {\n // Generate a group of new seeds\n FILE *f = fopen(\"/dev/urandom\", \"rb+\");\n if (!f) {\n perror(\"# /dev/urandom\");\n exit(1);\n }\n size_t n = fread(seeds, 8, NSEEDCAP, f);\n (void)n;\n assert(n == NSEEDCAP);\n fclose(f);\n nseeds = NSEEDCAP;\n }\n return seeds[--nseeds];\n}\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// Return monotonic nanoseconds of the CPU clock.\nint64_t sys_now(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// Return unix timestamp in nanoseconds\nint64_t sys_unixnow(void) {\n struct timespec now = { 0 };\n clock_gettime(CLOCK_REALTIME, &now);\n return nanotime(&now);\n}\n\n#ifdef __APPLE__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n task_basic_info_data_t taskInfo;\n mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT;\n kern_return_t kr = task_info(mach_task_self(), TASK_BASIC_INFO,\n (task_info_t)&taskInfo, &infoCount);\n if (kr != KERN_SUCCESS) {\n fprintf(stderr, \"# task_info: %s\\n\", mach_error_string(kr));\n abort();\n }\n info->virt = taskInfo.virtual_size;\n info->rss = taskInfo.resident_size;\n}\n#elif __linux__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n FILE *f = fopen(\"/proc/self/statm\", \"r\");\n if (!f) {\n perror(\"# open /proc/self/statm\");\n abort();\n }\n unsigned long vm_pages, rss_pages;\n long x = fscanf(f, \"%lu %lu\", &vm_pages, &rss_pages);\n fclose(f);\n if (x != 2) {\n perror(\"# read /proc/self/statm\");\n abort();\n }\n\n // Get the system page size (in bytes)\n size_t page_size = sysconf(_SC_PAGESIZE);\n assert(page_size > 0);\n\n // Convert pages to bytes\n info->virt = vm_pages * page_size;\n info->rss = rss_pages * page_size;\n}\n#endif\n\n#include \n\nconst char *sys_arch(void) {\n static __thread bool got = false;\n static __thread char arch[1024] = \"unknown/error\";\n if (!got) {\n struct utsname unameData;\n if (uname(&unameData) == 0) {\n snprintf(arch, sizeof(arch), \"%s/%s\", unameData.sysname, \n unameData.machine);\n char *p = arch;\n while (*p) {\n *p = tolower(*p);\n p++;\n }\n got = true;\n }\n }\n return arch;\n}\n\nvoid sys_genuseid(char useid[16]) {\n const uint8_t chs[] = \n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"0123456789\";\n uint64_t a = sys_seed();\n uint64_t b = sys_seed();\n uint8_t bytes[16];\n memcpy(bytes, &a, 8);\n memcpy(bytes+8, &b, 8);\n for (int i = 0; i < 16; i++) {\n bytes[i] = chs[bytes[i]%62];\n }\n memcpy(useid, bytes, 16);\n}\n\n// Returns a unique thread id for the current thread.\n// This is an artificial generated value that is always distinct. \nuint64_t sys_threadid(void) {\n static atomic_int_fast64_t next = 0;\n static __thread uint64_t id = 0;\n if (id == 0) {\n id = atomic_fetch_add_explicit(&next, 1, __ATOMIC_RELEASE);\n }\n return id;\n}\n"], ["/pogocache/src/conn.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit conn.c are interface functions for a network connection.\n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"args.h\"\n#include \"cmds.h\"\n#include \"xmalloc.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"helppage.h\"\n\n#define MAXPACKETSZ 1048576 // Maximum read packet size\n\nstruct conn {\n struct net_conn *conn5; // originating connection\n struct buf packet; // current incoming packet\n int proto; // connection protocol (memcache, http, etc)\n bool auth; // user is authorized\n bool noreply; // only for memcache\n bool keepalive; // only for http\n int httpvers; // only for http\n struct args args; // command args, if any\n struct pg *pg; // postgres context, only if proto is postgres\n};\n\nbool conn_istls(struct conn *conn) {\n return net_conn_istls(conn->conn5);\n}\n\nint conn_proto(struct conn *conn) {\n return conn->proto;\n}\n\nbool conn_auth(struct conn *conn) {\n return conn->auth;\n}\n\nvoid conn_setauth(struct conn *conn, bool ok) {\n conn->auth = ok;\n}\n\nbool conn_isclosed(struct conn *conn) {\n return net_conn_isclosed(conn->conn5);\n}\n\nvoid conn_close(struct conn *conn) {\n net_conn_close(conn->conn5);\n}\n\nvoid evopened(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = xmalloc(sizeof(struct conn));\n memset(conn, 0, sizeof(struct conn));\n conn->conn5 = conn5;\n net_conn_setudata(conn5, conn);\n}\n\nvoid evclosed(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n buf_clear(&conn->packet);\n args_free(&conn->args);\n pg_free(conn->pg);\n xfree(conn);\n}\n\n// network data handler\n// The evlen may be zero when returning from a bgwork routine, while having\n// existing data in the connection packet.\nvoid evdata(struct net_conn *conn5, const void *evdata, size_t evlen,\n void *udata)\n{\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n if (conn_isclosed(conn)) {\n goto close;\n }\n#ifdef DATASETOK\n if (evlen == 14 && memcmp(evdata, \"*1\\r\\n$4\\r\\nPING\\r\\n\", 14) == 0) {\n conn_write_raw(conn, \"+PONG\\r\\n\", 7);\n } else if (evlen == 13 && memcmp(evdata, \"*2\\r\\n$3\\r\\nGET\\r\\n\", 13) == 0) {\n conn_write_raw(conn, \"$1\\r\\nx\\r\\n\", 7);\n } else {\n conn_write_raw(conn, \"+OK\\r\\n\", 5);\n }\n return;\n#endif\n char *data;\n size_t len;\n bool copied;\n if (conn->packet.len == 0) {\n data = (char*)evdata;\n len = evlen;\n copied = false;\n } else {\n buf_append(&conn->packet, evdata, evlen);\n len = conn->packet.len;\n data = conn->packet.data;\n copied = true;\n }\n while (len > 0 && !conn_isclosed(conn)) {\n // Parse the command\n ssize_t n = parse_command(data, len, &conn->args, &conn->proto, \n &conn->noreply, &conn->httpvers, &conn->keepalive, &conn->pg);\n if (n == 0) {\n // Not enough data provided yet.\n break;\n } else if (n == -1) {\n // Protocol error occurred.\n conn_write_error(conn, parse_lasterror());\n if (conn->proto == PROTO_MEMCACHE) {\n // Memcache doesn't close, but we'll need to know the last\n // character position to continue and revert back to it so\n // we can attempt to continue to the next command.\n n = parse_lastmc_n();\n } else {\n // Close on protocol error\n conn_close(conn);\n break;\n }\n } else if (conn->args.len == 0) {\n // There were no command arguments provided.\n if (conn->proto == PROTO_POSTGRES) {\n if (!pg_respond(conn, conn->pg)) {\n // close connection\n conn_close(conn);\n break;\n }\n } else if (conn->proto == PROTO_MEMCACHE) {\n // Memcache simply returns a nondescript error.\n conn_write_error(conn, \"ERROR\");\n } else if (conn->proto == PROTO_HTTP) {\n // HTTP must always return arguments.\n assert(!\"PROTO_HTTP\");\n } else if (conn->proto == PROTO_RESP) {\n // RESP just continues until it gets args.\n }\n } else if (conn->proto == PROTO_POSTGRES && !conn->pg->ready) {\n // This should not have been reached. The client did not \n // send a startup message\n conn_close(conn);\n break;\n } else if (conn->proto != PROTO_POSTGRES || \n pg_precommand(conn, &conn->args, conn->pg))\n {\n evcommand(conn, &conn->args);\n }\n len -= n;\n data += n;\n if (net_conn_bgworking(conn->conn5)) {\n // BGWORK(0)\n break;\n }\n if (conn->proto == PROTO_HTTP) {\n conn_close(conn);\n }\n }\n if (conn_isclosed(conn)) {\n goto close;\n }\n if (len == 0) {\n if (copied) {\n if (conn->packet.cap > MAXPACKETSZ) {\n buf_clear(&conn->packet);\n }\n conn->packet.len = 0;\n }\n } else {\n if (copied) {\n memmove(conn->packet.data, data, len);\n conn->packet.len = len;\n } else {\n buf_append(&conn->packet, data, len);\n }\n }\n return;\nclose:\n conn_close(conn);\n}\n\nstruct bgworkctx {\n struct conn *conn;\n void *udata;\n void(*work)(void *udata);\n void(*done)(struct conn *conn, void *udata);\n};\n\nstatic void work5(void *udata) {\n struct bgworkctx *ctx = udata;\n ctx->work(ctx->udata);\n}\n\nstatic void done5(struct net_conn *conn, void *udata) {\n (void)conn;\n struct bgworkctx *ctx = udata;\n ctx->done(ctx->conn, ctx->udata);\n xfree(ctx);\n}\n\n// conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool conn_bgwork(struct conn *conn, void(*work)(void *udata), \n void(*done)(struct conn *conn, void *udata), void *udata)\n{\n struct bgworkctx *ctx = xmalloc(sizeof(struct bgworkctx));\n ctx->conn = conn;\n ctx->udata = udata;\n ctx->work = work;\n ctx->done = done;\n if (!net_conn_bgwork(conn->conn5, work5, done5, ctx)) {\n xfree(ctx);\n return false;\n }\n return true;\n}\n\nstatic void writeln(struct conn *conn, char ch, const void *data, ssize_t len) {\n if (len < 0) {\n len = strlen(data);\n }\n net_conn_out_ensure(conn->conn5, 3+len);\n net_conn_out_write_byte_nocheck(conn->conn5, ch);\n size_t mark = net_conn_out_len(conn->conn5);\n net_conn_out_write_nocheck(conn->conn5, data, len);\n net_conn_out_write_byte_nocheck(conn->conn5, '\\r');\n net_conn_out_write_byte_nocheck(conn->conn5, '\\n');\n uint8_t *out = (uint8_t*)net_conn_out(conn->conn5);\n for (ssize_t i = mark; i < len; i++) {\n if (out[i] < ' ') {\n out[i] = ' ';\n }\n }\n}\n\nstatic void write_error(struct conn *conn, const char *err, bool server) {\n if (conn->proto == PROTO_MEMCACHE) {\n if (strstr(err, \"ERR \") == err) {\n // convert to client or server error\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n if (server) {\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err+4); \n } else {\n snprintf(err2, err2sz, \"CLIENT_ERROR %s\\r\\n\", err+4); \n }\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n if (server) {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else if (strstr(err, \"CLIENT_ERROR \") == err || \n strstr(err, \"CLIENT_ERROR \") == err)\n {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"%s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n conn_write_raw(conn, \"ERROR\\r\\n\", 7);\n }\n }\n } else if (conn->proto == PROTO_POSTGRES) {\n if (strstr(err, \"ERR \") == err) {\n err = err+4;\n }\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n } else if (conn->proto == PROTO_HTTP) {\n if (strstr(err, \"ERR \") == err) {\n err += 4;\n }\n if (strcmp(err, \"Show Help HTML\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_HTML, -1);\n } else if (strcmp(err, \"Show Help TEXT\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_TEXT, -1);\n } else if (strcmp(err, \"Method Not Allowed\") == 0) {\n conn_write_http(conn, 405, \"Method Not Allowed\", \n \"Method Not Allowed\\r\\n\", -1);\n } else if (strcmp(err, \"Unauthorized\") == 0) {\n conn_write_http(conn, 401, \"Unauthorized\", \n \"Unauthorized\\r\\n\", -1);\n } else if (strcmp(err, \"Bad Request\") == 0) {\n conn_write_http(conn, 400, \"Bad Request\", \n \"Bad Request\\r\\n\", -1);\n } else {\n size_t sz = strlen(err)+32;\n char *err2 = xmalloc(sz);\n snprintf(err2, sz, \"ERR %s\\r\\n\", err);\n conn_write_http(conn, 500, \"Internal Server Error\", \n err2, -1);\n xfree(err2);\n }\n } else {\n writeln(conn, '-', err, -1);\n }\n}\n\nvoid conn_write_error(struct conn *conn, const char *err) {\n bool server = false;\n if (strcmp(err, ERR_OUT_OF_MEMORY) == 0) {\n server = true;\n }\n write_error(conn, err, server);\n}\n\nvoid conn_write_string(struct conn *conn, const char *cstr) {\n writeln(conn, '+', cstr, -1);\n}\n\nvoid conn_write_null(struct conn *conn) {\n net_conn_out_write(conn->conn5, \"$-1\\r\\n\", 5);\n}\n\nvoid resp_write_bulk(struct buf *buf, const void *data, size_t len) {\n uint8_t str[32];\n size_t n = u64toa(len, str);\n buf_append_byte(buf, '$');\n buf_append(buf, str, n);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n buf_append(buf, data, len);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n}\n\nvoid conn_write_bulk(struct conn *conn, const void *data, size_t len) {\n net_conn_out_ensure(conn->conn5, 32+len);\n size_t olen = net_conn_out_len(conn->conn5);\n uint8_t *base = (uint8_t*)net_conn_out(conn->conn5)+olen;\n uint8_t *p = base;\n *(p++) = '$';\n p += u64toa(len, p);\n *(p++) = '\\r';\n *(p++) = '\\n';\n memcpy(p, data, len);\n p += len;\n *(p++) = '\\r';\n *(p++) = '\\n';\n net_conn_out_setlen(conn->conn5, olen + (p-base));\n}\n\nvoid conn_write_raw(struct conn *conn, const void *data, size_t len) {\n net_conn_out_write(conn->conn5, data, len);\n}\n\nvoid conn_write_http(struct conn *conn, int code, const char *status,\n const void *body, ssize_t bodylen)\n{\n if (bodylen == -1) {\n if (!body) {\n body = status;\n }\n bodylen = strlen(body);\n }\n char resp[512];\n size_t n = snprintf(resp, sizeof(resp), \n \"HTTP/1.1 %d %s\\r\\n\"\n \"Content-Length: %zu\\r\\n\"\n \"Connection: Close\\r\\n\"\n \"\\r\\n\",\n code, status, bodylen);\n conn_write_raw(conn, resp, n);\n if (bodylen > 0) {\n conn_write_raw(conn, body, bodylen);\n }\n}\n\nvoid conn_write_array(struct conn *conn, size_t count) {\n uint8_t str[24];\n size_t n = u64toa(count, str);\n writeln(conn, '*', str, n);\n}\n\nvoid conn_write_uint(struct conn *conn, uint64_t value) {\n uint8_t buf[24];\n size_t n = u64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, '+', buf, n); // the '+' is needed for unsigned int\n }\n}\n\nvoid conn_write_int(struct conn *conn, int64_t value) {\n uint8_t buf[24];\n size_t n = i64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, ':', buf, n);\n }\n}\n\nvoid conn_write_raw_cstr(struct conn *conn, const char *cstr) {\n conn_write_raw(conn, cstr, strlen(cstr));\n}\n\nvoid conn_write_bulk_cstr(struct conn *conn, const char *cstr) {\n conn_write_bulk(conn, cstr, strlen(cstr));\n}\n\nvoid stat_cmd_get_incr(struct conn *conn) {\n net_stat_cmd_get_incr(conn->conn5);\n}\n\nvoid stat_cmd_set_incr(struct conn *conn) {\n net_stat_cmd_set_incr(conn->conn5);\n}\n\nvoid stat_get_hits_incr(struct conn *conn) {\n net_stat_get_hits_incr(conn->conn5);\n}\n\nvoid stat_get_misses_incr(struct conn *conn) {\n net_stat_get_misses_incr(conn->conn5);\n}\n\nbool pg_execute(struct conn *conn) {\n return conn->pg->execute;\n}\n\nstruct pg *conn_pg(struct conn *conn) {\n return conn->pg;\n}\n"], ["/pogocache/src/memcache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit memcache.c provides the parser for the Memcache wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\nstatic __thread size_t mc_n = 0;\n\nsize_t parse_lastmc_n(void) {\n return mc_n;\n}\n\nbool mc_valid_key(struct args *args, int i) {\n const uint8_t *key = (uint8_t*)args->bufs[i].data;\n size_t len = args->bufs[i].len;\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] == 0x7F) {\n return false;\n }\n }\n return true;\n}\n\nenum mc_cmd { MC_UNKNOWN, \n // writers (optional reply)\n MC_SET, MC_ADD, MC_REPLACE, MC_APPEND, MC_PREPEND, MC_CAS, // storage\n MC_INCR, MC_DECR, // increment/decrement\n MC_FLUSH_ALL, MC_DELETE, // deletion\n MC_TOUCH, // touch\n MC_VERBOSITY, // logging\n // readers (always replys)\n MC_GET, MC_GETS, // retreival\n MC_GAT, MC_GATS, // get and touch\n MC_VERSION, MC_STATS, // information\n MC_QUIT, // client\n};\n\nstatic bool is_mc_store_cmd(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_CAS;\n}\n\nstatic bool is_mc_noreplyable(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_VERBOSITY;\n}\n\nstatic ssize_t parse_memcache_telnet(const char *data, size_t len, \n struct args *args)\n{\n const char *p = data;\n const char *end = data+len;\n const char *s = p;\n char last = 0;\n while (p < end) {\n char ch = *(p++);\n if (ch == ' ') {\n size_t wn = p-s-1;\n // if (wn > 0) {\n args_append(args, s, wn, true);\n s = p;\n continue;\n }\n if (ch == '\\n') {\n size_t wn = p-s-1;\n if (last == '\\r') {\n wn--;\n }\n if (wn > 0) {\n args_append(args, s, wn, true);\n }\n return p-data;\n }\n last = ch;\n }\n return 0;\n}\n\nssize_t parse_memcache(const char *data, size_t len, struct args *args, \n bool *noreply)\n{\n ssize_t n = parse_memcache_telnet(data, len, args);\n if (n <= 0 || args->len == 0) {\n return n;\n }\n // args_print(args);\n mc_n = n;\n enum mc_cmd cmd;\n struct args args2 = { 0 };\n *noreply = false;\n // check for common get-2\n if (args->len == 2 && arg_const_eq(args, 0, \"get\")) {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args->bufs[0].data = \"mget\";\n args->bufs[0].len = 4;\n return n;\n }\n // Check for common set-5 (allows for expiry)\n if (args->len == 5 && arg_const_eq(args, 0, \"set\")) {\n if (args->bufs[2].len == 1 && args->bufs[2].data[0] == '0') {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool expset = false;\n int64_t x;\n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n expset = true;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n // replace the \"flags\" with a value\n args->bufs[2].len = value_len;\n args->bufs[2].data = (void*)value;\n args->len = 3;\n if (expset) {\n // add the \"ex \" to last two arguments\n args->bufs[4] = args->bufs[3];\n args->bufs[3].data = \"ex\";\n args->bufs[3].len = 2;\n args->len = 5;\n }\n return n;\n } else {\n // flags was set, use plus branch\n cmd = MC_SET;\n goto set_plus;\n }\n }\n // Otherwise use lookup command table. This could be optimized into a\n // switch table or hash table. See cmds.c for hash table example.\n cmd =\n arg_const_eq(args, 0, \"set\") ? MC_SET : // XY\n arg_const_eq(args, 0, \"add\") ? MC_ADD : // XY\n arg_const_eq(args, 0, \"cas\") ? MC_CAS : // XY\n arg_const_eq(args, 0, \"replace\") ? MC_REPLACE : // XY\n arg_const_eq(args, 0, \"get\") ? MC_GET : // XY\n arg_const_eq(args, 0, \"delete\") ? MC_DELETE : // XY\n arg_const_eq(args, 0, \"append\") ? MC_APPEND : // XY\n arg_const_eq(args, 0, \"prepend\") ? MC_PREPEND : // XY\n arg_const_eq(args, 0, \"gets\") ? MC_GETS : // XY\n arg_const_eq(args, 0, \"incr\") ? MC_INCR : // XY\n arg_const_eq(args, 0, \"decr\") ? MC_DECR: // XY\n arg_const_eq(args, 0, \"touch\") ? MC_TOUCH : // X\n arg_const_eq(args, 0, \"gat\") ? MC_GAT : // X\n arg_const_eq(args, 0, \"gats\") ? MC_GATS : // X\n arg_const_eq(args, 0, \"flush_all\") ? MC_FLUSH_ALL : // X\n arg_const_eq(args, 0, \"stats\") ? MC_STATS : // X\n arg_const_eq(args, 0, \"version\") ? MC_VERSION : // X\n arg_const_eq(args, 0, \"quit\") ? MC_QUIT : // XY\n arg_const_eq(args, 0, \"verbosity\") ? MC_VERBOSITY : // X\n MC_UNKNOWN;\n if (cmd == MC_UNKNOWN) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (is_mc_noreplyable(cmd)) {\n if (arg_const_eq(args, args->len-1, \"noreply\")) {\n *noreply = true;\n buf_clear(&args->bufs[args->len-1]);\n args->len--;\n }\n }\n if (is_mc_store_cmd(cmd)) {\n // Store commands include 'set', 'add', 'replace', 'append', 'prepend',\n // and 'cas'.\n if ((cmd == MC_CAS && args->len != 6) && \n (cmd != MC_CAS && args->len != 5))\n {\n parse_seterror(\"ERROR\");\n return -1;\n }\n set_plus:\n // check all values before continuing\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n int64_t x;\n if (!argi64(args, 2, &x) || x < 0) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool hascas = false;\n char cas[24] = \"0\";\n if (cmd == MC_CAS) {\n hascas = true;\n uint64_t y;\n if (!argu64(args, 5, &y)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n assert(args->bufs[5].len < sizeof(cas));\n memcpy(cas, args->bufs[5].data, args->bufs[5].len);\n cas[args->bufs[5].len] = '\\0';\n buf_clear(&args->bufs[5]);\n args->len--;\n }\n\n // Storage commands must read a value that follows the first line.\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n\n // Reconstruct the command into a RESP format. \n bool is_append_prepend = false;\n switch (cmd) {\n case MC_APPEND:\n args_append(&args2, \"append\", 6, true);\n is_append_prepend = true;\n break;\n case MC_PREPEND:\n args_append(&args2, \"prepend\", 7, true);\n is_append_prepend = true;\n break;\n default:\n args_append(&args2, \"set\", 3, true);\n break;\n }\n // Move key arg to new args\n take_and_append_arg(1);\n // Add value arg\n args_append(&args2, value, value_len, true);\n if (!is_append_prepend) {\n if (!(args->bufs[2].len == 1 && args->bufs[2].data[0] == '0')) {\n args_append(&args2, \"flags\", 5, true);\n take_and_append_arg(2);\n }\n \n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n args_append(&args2, \"ex\", 2, true);\n take_and_append_arg(3);\n }\n if (cmd == MC_ADD) {\n args_append(&args2, \"nx\", 2, true);\n } else if (cmd == MC_REPLACE) {\n args_append(&args2, \"xx\", 2, true);\n }\n if (hascas) {\n args_append(&args2, \"cas\", 3, true);\n args_append(&args2, cas, strlen(cas), false);\n }\n }\n } else if (cmd == MC_GET) {\n // Convert 'get * into 'MGET *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mget\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_DELETE) {\n // Convert 'delete ' into 'DEL '\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (args->len > 2) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"del\", 3, true);\n take_and_append_arg(1);\n } else if (cmd == MC_GETS) {\n // Convert 'gets * into 'MGETS *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mgets\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GAT) {\n // Convert 'gat * into 'gat *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gat\", 3, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GATS) {\n // Convert 'gats * into 'gats *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gats\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_STATS) {\n args_append(&args2, \"stats\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_INCR) {\n // Convert 'incr into 'uincrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"uincrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_DECR) {\n // Convert 'decr into 'udecrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"udecrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_TOUCH) {\n // Convert 'touch ' into 'expire '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"expire\", 6, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_FLUSH_ALL) {\n // Convert 'flush_all [delay]' into 'FLUSHALL [DELAY seconds]'\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"flushall\", 8, true);\n if (args->len == 2) {\n args_append(&args2, \"delay\", 5, true);\n take_and_append_arg(1);\n }\n } else if (cmd == MC_QUIT) {\n args_append(&args2, \"quit\", 4, true);\n *noreply = true;\n } else if (cmd == MC_VERSION) {\n args_append(&args2, \"version\", 7, true);\n *noreply = false;\n } else if (cmd == MC_VERBOSITY) {\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"verbosity\", 7, true);\n take_and_append_arg(1);\n } else {\n return -1;\n }\n args_free(args);\n *args = args2;\n return n;\n}\n"], ["/pogocache/src/resp.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit resp.c provides the parser for the RESP wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args) {\n char *err = NULL;\n struct buf arg = { 0 };\n bool inarg = false;\n char quote = '\\0';\n for (size_t i = 0; i < len; i++) {\n char ch = bytes[i];\n if (inarg) {\n if (quote) {\n if (ch == '\\n') {\n goto fail_quotes;\n }\n if (ch == quote) { \n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n inarg = false;\n if (ch == '\\n') {\n i--;\n continue;\n }\n if (!isspace(ch)) {\n goto fail_quotes;\n }\n continue;\n } else if (ch == '\\\\') {\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n switch (ch) {\n case 'n': ch = '\\n'; break;\n case 'r': ch = '\\r'; break;\n case 't': ch = '\\t'; break;\n }\n }\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n } else {\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else if (isspace(ch)) {\n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n if (ch == '\\n') {\n break;\n }\n inarg = false;\n } else {\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n } else {\n if (ch == '\\n') {\n buf_clear(&arg);\n return i+1;\n }\n if (isspace(ch)) {\n continue;\n }\n inarg = true;\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else {\n quote = 0;\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n }\n buf_clear(&arg);\n return 0;\nfail_quotes:\n if (!err) err = \"ERR Protocol error: unbalanced quotes in request\";\nfail_nargs:\n if (!err) err = \"ERR Protocol error: invalid multibulk length\";\nfail_argsz:\n if (!err) err = \"ERR Protocol error: invalid bulk length\";\n/* fail: */\n if (err) {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \"%s\", err);\n }\n buf_clear(&arg);\n return -1;\n}\n\nstatic int64_t read_num(const char *data, size_t len, int64_t min, int64_t max,\n bool *ok)\n{\n errno = 0;\n char *end;\n int64_t x = strtoll(data, &end, 10);\n *ok = errno == 0 && (size_t)(end-data) == len && x >= min && x <= max;\n return x;\n}\n\n#define read_resp_num(var, min, max, errmsg) { \\\n char *p = memchr(bytes, '\\r', end-bytes); \\\n if (!p) { \\\n if (end-bytes > 32) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n return 0; \\\n } \\\n if (p+1 == end) { \\\n return 0; \\\n } \\\n if (*(p+1) != '\\n') { \\\n return -1; \\\n } \\\n bool ok; \\\n var = read_num(bytes, p-bytes, min, max, &ok); \\\n if (!ok) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n bytes = p+2; \\\n}\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp(const char *bytes, size_t len, struct args *args) {\n const char *start = bytes;\n const char *end = bytes+len;\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '*') {\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nargs;\n read_resp_num(nargs, LONG_MIN, MAXARGS, \"invalid multibulk length\");\n for (int j = 0; j < nargs; j++) {\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '$') {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \n \"ERR Protocol error: expected '$', got '%c'\", *(bytes-1));\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nbytes;\n read_resp_num(nbytes, 0, MAXARGSZ, \"invalid bulk length\");\n if (nbytes+2 > end-bytes) {\n return 0;\n }\n args_append(args, bytes, nbytes, true);\n bytes += nbytes+2;\n }\n return bytes-start;\n}\n\n"], ["/pogocache/src/http.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit http.c provides the parser for the HTTP wire protocol.\n#define _GNU_SOURCE \n#include \n#include \n#include \n#include \n#include \"stats.h\"\n#include \"util.h\"\n#include \"parse.h\"\n\nextern const bool useauth;\nextern const char *auth;\n\nbool http_valid_key(const char *key, size_t len) {\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] >= 0x7F || key[i] == '%' || key[i] == '+' ||\n key[i] == '@' || key[i] == '$' || key[i] == '?' || key[i] == '=') \n {\n return false;\n }\n }\n return true;\n}\n\nssize_t parse_http(const char *data, size_t len, struct args *args, \n int *httpvers, bool *keepalive)\n{\n *keepalive = false;\n *httpvers = 0;\n const char *method = 0;\n size_t methodlen = 0;\n const char *uri = 0;\n size_t urilen = 0;\n int proto = 0;\n const char *hdrname = 0; \n size_t hdrnamelen = 0;\n const char *hdrval = 0;\n size_t hdrvallen = 0;\n size_t bodylen = 0;\n bool nocontentlength = true;\n bool html = false;\n const char *authhdr = 0;\n size_t authhdrlen = 0;\n const char *p = data;\n const char *e = p+len;\n const char *s = p;\n while (p < e) {\n if (*p == ' ') {\n method = s;\n methodlen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == ' ') {\n uri = s;\n urilen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n if (p-s-1 != 8 || !bytes_const_eq(s, 5, \"HTTP/\") || \n s[5] < '0' || s[5] > '9' || s[6] != '.' || \n s[7] < '0' || s[7] > '9')\n {\n goto badproto;\n }\n proto = (s[5]-'0')*10+(s[7]-'0');\n if (proto < 9 || proto >= 30) {\n goto badproto;\n }\n if (proto >= 11) {\n *keepalive = true;\n }\n *httpvers = proto;\n p++;\n goto readhdrs;\n }\n \n p++;\n }\n goto badreq;\nreadhdrs:\n // Parse the headers, pulling the pairs along the way.\n while (p < e) {\n hdrname = p;\n while (p < e) {\n if (*p == ':') {\n hdrnamelen = p-hdrname;\n p++;\n while (p < e && *p == ' ') {\n p++;\n }\n hdrval = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n hdrvallen = p-hdrval-1;\n // printf(\"[%.*s]=[%.*s]\\n\", (int)hdrnamelen, hdrname,\n // (int)hdrvallen, hdrval);\n // We have a new header pair (hdrname, hdrval);\n if (argeq_bytes(hdrname, hdrnamelen, \"content-length\")){\n uint64_t x;\n if (!parse_u64(hdrval, hdrvallen, &x) || \n x > MAXARGSZ)\n {\n stat_store_too_large_incr(0);\n goto badreq;\n }\n bodylen = x;\n nocontentlength = false;\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"connection\"))\n {\n *keepalive = argeq_bytes(hdrval, hdrvallen, \n \"keep-alive\");\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"accept\"))\n {\n if (memmem(hdrval, hdrvallen, \"text/html\", 9) != 0){\n html = true;\n }\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"authorization\"))\n {\n authhdr = hdrval;\n authhdrlen = hdrvallen;\n }\n p++;\n if (p < e && *p == '\\r') {\n p++;\n if (p < e && *p == '\\n') {\n p++;\n } else {\n goto badreq;\n }\n goto readbody;\n }\n break;\n }\n p++;\n }\n break;\n }\n p++;\n }\n }\n return 0;\nreadbody:\n // read the content body\n if ((size_t)(e-p) < bodylen) {\n return 0;\n }\n const char *body = p;\n p = e;\n\n // check\n if (urilen == 0 || uri[0] != '/') {\n goto badreq;\n }\n uri++;\n urilen--;\n const char *ex = 0;\n size_t exlen = 0;\n const char *flags = 0;\n size_t flagslen = 0;\n const char *cas = 0;\n size_t caslen = 0;\n const char *qauth = 0;\n size_t qauthlen = 0;\n bool xx = false;\n bool nx = false;\n // Parse the query string, pulling the pairs along the way.\n size_t querylen = 0;\n const char *query = memchr(uri, '?', urilen);\n if (query) {\n querylen = urilen-(query-uri);\n urilen = query-uri;\n query++;\n querylen--;\n const char *qkey;\n size_t qkeylen;\n const char *qval;\n size_t qvallen;\n size_t j = 0;\n size_t k = 0;\n for (size_t i = 0; i < querylen; i++) {\n if (query[i] == '=') {\n k = i;\n i++;\n for (; i < querylen; i++) {\n if (query[i] == '&') {\n break;\n }\n }\n qval = query+k+1;\n qvallen = i-k-1;\n qkeyonly:\n qkey = query+j;\n qkeylen = k-j;\n // We have a new query pair (qkey, qval);\n if (bytes_const_eq(qkey, qkeylen, \"flags\")) {\n flags = qval;\n flagslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"ex\") || \n bytes_const_eq(qkey, qkeylen, \"ttl\"))\n {\n ex = qval;\n exlen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"cas\")) {\n cas = qval;\n caslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"xx\")) {\n xx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"nx\")) {\n nx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"auth\")) {\n qauth = qval;\n qauthlen = qvallen;\n }\n j = i+1;\n } else if (query[i] == '&' || i == querylen-1) {\n qval = 0;\n qvallen = 0;\n if (i == querylen-1) {\n i++;\n }\n k = i;\n goto qkeyonly;\n }\n }\n }\n // The entire HTTP request is complete.\n // Turn request into valid command arguments.\n if (bytes_const_eq(method, methodlen, \"GET\")) {\n if (urilen > 0 && uri[0] == '@') {\n // system command such as @stats or @flushall\n goto badreq;\n } else if (urilen == 0) {\n goto showhelp;\n } else {\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"get\", 3, true);\n args_append(args, uri, urilen, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"PUT\")) {\n if (nocontentlength) {\n // goto badreq;\n }\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"set\", 3, true);\n args_append(args, uri, urilen, true);\n args_append(args, body, bodylen, true);\n if (cas) {\n args_append(args, \"cas\", 3, true);\n args_append(args, cas, caslen, true);\n }\n if (ex) {\n args_append(args, \"ex\", 2, true);\n args_append(args, ex, exlen, true);\n }\n if (flags) {\n args_append(args, \"flags\", 5, true);\n args_append(args, flags, flagslen, true);\n }\n if (xx) {\n args_append(args, \"xx\", 2, true);\n }\n if (nx) {\n args_append(args, \"nx\", 2, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"DELETE\")) {\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"del\", 3, true);\n args_append(args, uri, urilen, true);\n } else {\n parse_seterror(\"Method Not Allowed\");\n goto badreq;\n }\n\n // Check authorization\n const char *authval = 0;\n size_t authvallen = 0;\n if (qauthlen > 0) {\n authval = qauth;\n authvallen = qauthlen;\n } else if (authhdrlen > 0) {\n if (authhdrlen >= 7 && strncmp(authhdr, \"Bearer \", 7) == 0) {\n authval = authhdr + 7;\n authvallen = authhdrlen - 7;\n } else {\n goto unauthorized;\n }\n }\n if (useauth || authvallen > 0) {\n stat_auth_cmds_incr(0);\n size_t authlen = strlen(auth);\n if (authvallen != authlen || memcmp(auth, authval, authlen) != 0) {\n stat_auth_errors_incr(0);\n goto unauthorized;\n }\n\n }\n return e-data;\nbadreq:\n parse_seterror(\"Bad Request\");\n return -1;\nbadproto:\n parse_seterror(\"Bad Request\");\n return -1;\nbadkey:\n parse_seterror(\"Invalid Key\");\n return -1;\nunauthorized:\n parse_seterror(\"Unauthorized\");\n return -1;\nshowhelp:\n if (html) {\n parse_seterror(\"Show Help HTML\");\n } else {\n parse_seterror(\"Show Help TEXT\");\n }\n return -1;\n}\n"], ["/pogocache/src/args.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit args.c provides functions for managing command arguments\n#include \n#include \n#include \n#include \"args.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n\nconst char *args_at(struct args *args, int idx, size_t *len) {\n *len = args->bufs[idx].len;\n return args->bufs[idx].data;\n}\n\nint args_count(struct args *args) {\n return args->len;\n}\n\nbool args_eq(struct args *args, int index, const char *str) {\n if ((size_t)index >= args->len) {\n return false;\n }\n size_t alen = args->bufs[index].len;\n const char *arg = args->bufs[index].data;\n size_t slen = strlen(str); \n if (alen != slen) {\n return false;\n }\n for (size_t i = 0; i < slen ; i++) {\n if (tolower(str[i]) != tolower(arg[i])) {\n return false;\n }\n }\n return true;\n}\n\nvoid args_append(struct args *args, const char *data, size_t len,\n bool zerocopy)\n{\n#ifdef NOZEROCOPY\n zerocopy = 0;\n#endif\n if (args->len == args->cap) {\n args->cap = args->cap == 0 ? 4 : args->cap*2;\n args->bufs = xrealloc(args->bufs, args->cap * sizeof(struct buf));\n memset(&args->bufs[args->len], 0, (args->cap-args->len) * \n sizeof(struct buf));\n }\n if (zerocopy) {\n buf_clear(&args->bufs[args->len]);\n args->bufs[args->len].len = len;\n args->bufs[args->len].data = (char*)data;\n } else {\n args->bufs[args->len].len = 0;\n buf_append(&args->bufs[args->len], data, len);\n }\n if (args->len == 0) {\n args->zerocopy = zerocopy;\n } else {\n args->zerocopy = args->zerocopy && zerocopy;\n }\n args->len++;\n}\n\nvoid args_clear(struct args *args) {\n if (!args->zerocopy) {\n for (size_t i = 0 ; i < args->len; i++) {\n buf_clear(&args->bufs[i]);\n }\n }\n args->len = 0;\n}\n\nvoid args_free(struct args *args) {\n args_clear(args);\n xfree(args->bufs);\n}\n\nvoid args_print(struct args *args) {\n printf(\". \");\n for (size_t i = 0; i < args->len; i++) {\n char *buf = args->bufs[i].data;\n int len = args->bufs[i].len;\n printf(\"[\"); \n binprint(buf, len);\n printf(\"] \");\n }\n printf(\"\\n\");\n}\n\n// remove the first item\nvoid args_remove_first(struct args *args) {\n if (args->len > 0) {\n buf_clear(&args->bufs[0]);\n for (size_t i = 1; i < args->len; i++) {\n args->bufs[i-1] = args->bufs[i];\n }\n args->len--;\n }\n}\n"], ["/pogocache/src/parse.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit parse.c provides the entrypoint for parsing all data \n// for incoming client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n\n__thread char parse_lasterr[1024] = \"\";\n\nconst char *parse_lasterror(void) {\n return parse_lasterr;\n}\n\nssize_t parse_resp(const char *bytes, size_t len, struct args *args);\nssize_t parse_memcache(const char *data, size_t len, struct args *args,\n bool *noreply);\nssize_t parse_http(const char *data, size_t len, struct args *args,\n int *httpvers, bool *keepalive);\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args);\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pg);\n\nstatic bool sniff_proto(const char *data, size_t len, int *proto) {\n if (len > 0 && data[0] == '*') {\n *proto = PROTO_RESP;\n return true;\n }\n if (len > 0 && data[0] == '\\0') {\n *proto = PROTO_POSTGRES;\n return true;\n }\n // Parse the first line of text\n size_t n = 0;\n for (size_t i = 0; i < len; i++) {\n if (data[i] == '\\n') {\n n = i+1;\n break;\n }\n }\n // Look for \" HTTP/*.*\\r\\n\" suffix\n if (n >= 11 && memcmp(data+n-11, \" HTTP/\", 5) == 0 && \n data[n-4] == '.' && data[n-2] == '\\r')\n {\n *proto = PROTO_HTTP;\n return true;\n }\n // Trim the prefix, Resp+Telnet and Memcache both allow for spaces between\n // arguments.\n while (*data == ' ') {\n data++;\n n--;\n len--;\n }\n // Treat all uppercase commands as Resp+Telnet\n if (n > 0 && data[0] >= 'A' && data[0] <= 'Z') {\n *proto = PROTO_RESP;\n return true;\n }\n // Look for Memcache commands\n if (n >= 1) {\n *proto = PROTO_MEMCACHE;\n return true;\n }\n // Protocol is unknown\n *proto = 0;\n return false;\n}\n\n// Returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\n// On success, the args and proto will be set to the command arguments and\n// protocol type, respectively.\n//\n// It's required to set proto to 0 for the first command, per client.\n// Then continue to provide the last known proto. \n// This allows for the parser to learn and predict the protocol for ambiguous\n// protocols; like Resp+Telnet, Memcache+Text, HTTP, etc.\n//\n// The noreply param is an output param that is only set when the proto is\n// memcache. The argument is stripped from the args array,\n// but made available to the caller in case it needs to be known.\n//\n// The keepalive param is an output param that is only set when the proto is\n// http. It's used to let the caller know to keep the connection alive for\n// another request.\nssize_t parse_command(const void *data, size_t len, struct args *args, \n int *proto, bool *noreply, int *httpvers, bool *keepalive, struct pg **pg)\n{\n args_clear(args);\n parse_lasterr[0] = '\\0';\n *httpvers = 0;\n *noreply = false;\n *keepalive = false;\n // Sniff for the protocol. This should only happen once per client, upon\n // their first request.\n if (*proto == 0) {\n if (!sniff_proto(data, len, proto)) {\n // Unknown protocol\n goto fail;\n }\n if (*proto == 0) {\n // Not enough data to determine yet\n return 0;\n }\n }\n if (*proto == PROTO_RESP) {\n const uint8_t *bytes = data;\n if (bytes[0] == '*') {\n return parse_resp(data, len, args);\n } else {\n return parse_resp_telnet(data, len, args);\n }\n } else if (*proto == PROTO_MEMCACHE) {\n return parse_memcache(data, len, args, noreply);\n } else if (*proto == PROTO_HTTP) {\n return parse_http(data, len, args, httpvers, keepalive);\n } else if (*proto == PROTO_POSTGRES) {\n return parse_postgres(data, len, args, pg);\n }\nfail:\n parse_seterror(\"ERROR\");\n return -1;\n}\n\n"], ["/pogocache/src/buf.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit buf.c is a simple interface for creating byte buffers\n#include \n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"buf.h\"\n\nvoid buf_ensure(struct buf *buf, size_t len) {\n if (buf->len+len > buf->cap) {\n size_t oldcap = buf->cap;\n size_t newcap = buf->cap;\n if (oldcap == 0) {\n buf->data = 0;\n newcap = 16;\n } else {\n newcap *= 2;\n }\n while (buf->len+len > newcap) {\n newcap *= 2;\n }\n buf->data = xrealloc(buf->data, newcap);\n buf->cap = newcap;\n }\n}\n\nvoid buf_append(struct buf *buf, const void *data, size_t len){\n buf_ensure(buf, len);\n memcpy(buf->data+buf->len, data, len);\n buf->len += len;\n}\n\nvoid buf_append_byte(struct buf *buf, char byte) {\n if (buf->len < buf->cap) {\n buf->data[buf->len++] = byte;\n } else {\n buf_append(buf, &byte, 1);\n }\n}\n\nvoid buf_clear(struct buf *buf) {\n // No capacity means this buffer is owned somewhere else and we \n // must not free the data.\n if (buf->cap) {\n xfree(buf->data);\n }\n memset(buf, 0, sizeof(struct buf));\n}\n\nvoid buf_append_uvarint(struct buf *buf, uint64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_u64(buf->data+buf->len, x);\n buf->len += n;\n}\n\nvoid buf_append_varint(struct buf *buf, int64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_i64(buf->data+buf->len, x);\n buf->len += n;\n}\n"], ["/pogocache/src/tls.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit tls.c provides an interface for translating TLS bytes streams.\n// This is intended to be used with client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"tls.h\"\n#include \"xmalloc.h\"\n#include \"openssl.h\"\n\n#ifdef NOOPENSSL\n\nvoid tls_init(void) {}\nbool tls_accept(int fd, struct tls **tls_out) {\n (void)fd;\n *tls_out = 0;\n return true;\n}\nint tls_close(struct tls *tls, int fd) {\n (void)tls;\n return close(fd);\n}\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n (void)tls;\n return read(fd, data, len);\n}\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n (void)tls;\n return write(fd, data, len);\n}\n#else\n\nextern const bool usetls;\nextern const char *tlscertfile;\nextern const char *tlscacertfile;\nextern const char *tlskeyfile;\n\nstatic SSL_CTX *ctx;\n\nstruct tls {\n SSL *ssl;\n};\n\nvoid tls_init(void) {\n if (!usetls) {\n return;\n }\n ctx = SSL_CTX_new(TLS_server_method());\n if (!SSL_CTX_load_verify_locations(ctx, tlscacertfile, 0)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(1);\n }\n if (!SSL_CTX_use_certificate_file(ctx, tlscertfile , SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_use_PrivateKey_file(ctx, tlskeyfile, SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_check_private_key(ctx)) {\n printf(\"# tls: private key does not match the certificate\\n\");\n exit(EXIT_FAILURE);\n }\n}\n\nbool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n // tls is disabled for all of pogocache.\n *tls_out = 0;\n return true;\n }\n SSL *ssl = SSL_new(ctx);\n if (!ssl) {\n printf(\"# tls: SSL_new() failed\\n\");\n *tls_out = 0;\n return false;\n }\n SSL_set_fd(ssl, fd);\n SSL_set_verify(ssl, SSL_VERIFY_PEER, 0);\n int ret = SSL_accept(ssl);\n if (ret <= 0) {\n int err = SSL_get_error(ssl, ret);\n if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {\n printf(\"# tls: SSL_accept() failed\\n\");\n ERR_print_errors_fp(stderr);\n SSL_free(ssl);\n *tls_out = 0;\n return false;\n }\n }\n struct tls *tls = xmalloc(sizeof(struct tls));\n memset(tls, 0, sizeof(struct tls));\n tls->ssl = ssl;\n *tls_out = tls;\n return true;\n}\n\nint tls_close(struct tls *tls, int fd) {\n if (tls) {\n if (SSL_shutdown(tls->ssl) == 0) {\n SSL_shutdown(tls->ssl);\n }\n SSL_free(tls->ssl);\n xfree(tls);\n }\n return close(fd);\n}\n\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n if (!tls) {\n return write(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_write_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else {\n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n if (!tls) {\n return read(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_read_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else { \n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\n#endif\n"], ["/pogocache/src/xmalloc.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit xmalloc.c is the primary allocator interface. The xmalloc/xfree\n// functions should be used instead of malloc/free.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#if defined(__linux__) && defined(__GLIBC__)\n#include \n#define HAS_MALLOC_H\n#endif\n\n// from main.c\nextern const int useallocator;\nextern const bool usetrackallocs;\n\n#ifdef NOTRACKALLOCS\n#define add_alloc()\n#define sub_alloc()\nsize_t xallocs(void) {\n return 0;\n}\n#else\nstatic atomic_int_fast64_t nallocs = 0;\n\nsize_t xallocs(void) {\n if (usetrackallocs) {\n return atomic_load(&nallocs);\n } else {\n return 0;\n }\n}\n\nstatic void add_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_add_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n\nstatic void sub_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_sub_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n#endif\n\nstatic void check_ptr(void *ptr) {\n if (!ptr) {\n fprintf(stderr, \"# %s\\n\", strerror(ENOMEM));\n abort();\n }\n}\n\nvoid *xmalloc(size_t size) {\n void *ptr = malloc(size);\n check_ptr(ptr);\n add_alloc();\n return ptr;\n}\n\nvoid *xrealloc(void *ptr, size_t size) {\n if (!ptr) {\n return xmalloc(size);\n }\n ptr = realloc(ptr, size);\n check_ptr(ptr);\n return ptr;\n}\n\nvoid xfree(void *ptr) {\n if (!ptr) {\n return;\n }\n free(ptr);\n sub_alloc();\n}\n\nvoid xpurge(void) {\n#ifdef HAS_MALLOC_H\n // Releases unused heap memory to OS\n malloc_trim(0);\n#endif\n}\n"], ["/pogocache/src/stats.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit stats.c tracks various stats. Mostly for the memcache protocol.\n#include \n#include \"stats.h\"\n\nstatic atomic_uint_fast64_t g_stat_cmd_flush = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_touch = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_meta = 0;\nstatic atomic_uint_fast64_t g_stat_get_expired = 0;\nstatic atomic_uint_fast64_t g_stat_get_flushed = 0;\nstatic atomic_uint_fast64_t g_stat_delete_misses = 0;\nstatic atomic_uint_fast64_t g_stat_delete_hits = 0;\nstatic atomic_uint_fast64_t g_stat_incr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_incr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_decr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_decr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_misses = 0;\nstatic atomic_uint_fast64_t g_stat_cas_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_badval = 0;\nstatic atomic_uint_fast64_t g_stat_touch_hits = 0;\nstatic atomic_uint_fast64_t g_stat_touch_misses = 0;\nstatic atomic_uint_fast64_t g_stat_store_too_large = 0;\nstatic atomic_uint_fast64_t g_stat_store_no_memory = 0;\nstatic atomic_uint_fast64_t g_stat_auth_cmds = 0;\nstatic atomic_uint_fast64_t g_stat_auth_errors = 0;\n\nvoid stat_cmd_flush_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_flush, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_touch_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_touch, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_meta_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_meta, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_expired_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_expired, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_flushed_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_flushed, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_badval_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_badval, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_too_large_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_too_large, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_no_memory_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_no_memory, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_cmds_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_cmds, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_errors_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_errors, 1, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_flush(void) {\n return atomic_load_explicit(&g_stat_cmd_flush, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_touch(void) {\n return atomic_load_explicit(&g_stat_cmd_touch, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_meta(void) {\n return atomic_load_explicit(&g_stat_cmd_meta, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_expired(void) {\n return atomic_load_explicit(&g_stat_get_expired, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_flushed(void) {\n return atomic_load_explicit(&g_stat_get_flushed, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_misses(void) {\n return atomic_load_explicit(&g_stat_delete_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_hits(void) {\n return atomic_load_explicit(&g_stat_delete_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_misses(void) {\n return atomic_load_explicit(&g_stat_incr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_hits(void) {\n return atomic_load_explicit(&g_stat_incr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_misses(void) {\n return atomic_load_explicit(&g_stat_decr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_hits(void) {\n return atomic_load_explicit(&g_stat_decr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_misses(void) {\n return atomic_load_explicit(&g_stat_cas_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_hits(void) {\n return atomic_load_explicit(&g_stat_cas_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_badval(void) {\n return atomic_load_explicit(&g_stat_cas_badval, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_hits(void) {\n return atomic_load_explicit(&g_stat_touch_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_misses(void) {\n return atomic_load_explicit(&g_stat_touch_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_too_large(void) {\n return atomic_load_explicit(&g_stat_store_too_large, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_no_memory(void) {\n return atomic_load_explicit(&g_stat_store_no_memory, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_cmds(void) {\n return atomic_load_explicit(&g_stat_auth_cmds, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_errors(void) {\n return atomic_load_explicit(&g_stat_auth_errors, __ATOMIC_RELAXED);\n}\n\n\n"], ["/pogocache/src/uring.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n#include \n#include \"uring.h\"\n\nbool uring_available(void) {\n#ifdef NOURING\n return false;\n#else\n static atomic_int available = -1;\n if (atomic_load_explicit(&available, __ATOMIC_ACQUIRE )== -1) {\n struct io_uring ring;\n if (io_uring_queue_init(1, &ring, 0) == 0) {\n io_uring_queue_exit(&ring);\n atomic_store(&available, 1);\n } else {\n atomic_store(&available, 0);\n }\n }\n return atomic_load_explicit(&available, __ATOMIC_ACQUIRE) == 1;\n#endif\n}\n"]], "task_instance_info": {"created_time": "2025-08-20 18:04:05", "created_task_model": "DeepSeek-R1", "function_skeleton": "int main(void) {}", "code_description": "This code segment functions to: Serve as the entry point for executing performance benchmarks or functional tests based on environmental configuration, while initializing custom memory management procedures and coordinating validation routines for data structure operations."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```c\n[TASK_BEGIN]\nint main(void) {\n #ifdef BENCH\n benchmarks();\n #else\n all();\n #endif\n return 0;\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 18-04-26"}, "editdistance_info": {"edit_distance": 22.7273, "calculate_time": "2025-08-20 18:04:26", "true_code_clean": "int main(void) {\n hashmap_set_allocator(xmalloc, xfree);\n if (getenv(\"BENCH\")) {\n printf(\"Running hashmap.c benchmarks...\\n\");\n benchmarks();\n } else {\n printf(\"Running hashmap.c tests...\\n\");\n all();\n printf(\"PASSED\\n\");\n }\n}", "predict_code_clean": "int main(void) {\n #ifdef BENCH\n benchmarks();\n #else\n all();\n #endif\n return 0;\n}"}} {"repo_name": "pogocache", "file_name": "/pogocache/src/tls.c", "inference_info": {"prefix_code": "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit tls.c provides an interface for translating TLS bytes streams.\n// This is intended to be used with client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"tls.h\"\n#include \"xmalloc.h\"\n#include \"openssl.h\"\n\n#ifdef NOOPENSSL\n\nvoid tls_init(void) {}\nbool tls_accept(int fd, struct tls **tls_out) {\n (void)fd;\n *tls_out = 0;\n return true;\n}\nint tls_close(struct tls *tls, int fd) {\n (void)tls;\n return close(fd);\n}\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n (void)tls;\n return read(fd, data, len);\n}\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n (void)tls;\n return write(fd, data, len);\n}\n#else\n\nextern const bool usetls;\nextern const char *tlscertfile;\nextern const char *tlscacertfile;\nextern const char *tlskeyfile;\n\nstatic SSL_CTX *ctx;\n\nstruct tls {\n SSL *ssl;\n};\n\nvoid tls_init(void) {\n if (!usetls) {\n return;\n }\n ctx = SSL_CTX_new(TLS_server_method());\n if (!SSL_CTX_load_verify_locations(ctx, tlscacertfile, 0)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(1);\n }\n if (!SSL_CTX_use_certificate_file(ctx, tlscertfile , SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_use_PrivateKey_file(ctx, tlskeyfile, SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_check_private_key(ctx)) {\n printf(\"# tls: private key does not match the certificate\\n\");\n exit(EXIT_FAILURE);\n }\n}\n\n", "suffix_code": "\n\nint tls_close(struct tls *tls, int fd) {\n if (tls) {\n if (SSL_shutdown(tls->ssl) == 0) {\n SSL_shutdown(tls->ssl);\n }\n SSL_free(tls->ssl);\n xfree(tls);\n }\n return close(fd);\n}\n\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n if (!tls) {\n return write(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_write_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else {\n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n if (!tls) {\n return read(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_read_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else { \n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\n#endif\n", "middle_code": "bool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n *tls_out = 0;\n return true;\n }\n SSL *ssl = SSL_new(ctx);\n if (!ssl) {\n printf(\"# tls: SSL_new() failed\\n\");\n *tls_out = 0;\n return false;\n }\n SSL_set_fd(ssl, fd);\n SSL_set_verify(ssl, SSL_VERIFY_PEER, 0);\n int ret = SSL_accept(ssl);\n if (ret <= 0) {\n int err = SSL_get_error(ssl, ret);\n if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {\n printf(\"# tls: SSL_accept() failed\\n\");\n ERR_print_errors_fp(stderr);\n SSL_free(ssl);\n *tls_out = 0;\n return false;\n }\n }\n struct tls *tls = xmalloc(sizeof(struct tls));\n memset(tls, 0, sizeof(struct tls));\n tls->ssl = ssl;\n *tls_out = tls;\n return true;\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "c", "sub_task_type": null}, "context_code": [["/pogocache/src/net.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit net.c provides most network functionality, including listening on ports,\n// thread creation, event queue handling, and reading & writing sockets.\n#define _GNU_SOURCE\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#ifdef __linux__\n#include \n#include \n#include \n#include \n#else\n#include \n#endif\n\n#include \"uring.h\"\n#include \"stats.h\"\n#include \"net.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"xmalloc.h\"\n\n#define PACKETSIZE 16384\n#define MINURINGEVENTS 2 // there must be at least 2 events for uring use\n\nextern const int verb;\n\nstatic int setnonblock(int fd) {\n int flags = fcntl(fd, F_GETFL, 0);\n if (flags == -1) {\n return -1;\n }\n return fcntl(fd, F_SETFL, flags | O_NONBLOCK);\n}\n\nstatic int settcpnodelay(int fd, bool nodelay) {\n int val = nodelay;\n return setsockopt(fd, SOL_SOCKET, TCP_NODELAY, &val, sizeof(val)) == 0;\n}\n\nstatic int setquickack(int fd, bool quickack) {\n#if defined(__linux__)\n int val = quickack;\n return setsockopt(fd, SOL_SOCKET, TCP_QUICKACK, &val, sizeof(val)) == 0;\n#else\n (void)fd, (void)quickack;\n return 0;\n#endif\n}\n\nstatic int setkeepalive(int fd, bool keepalive) {\n int val = keepalive;\n if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val))) {\n return -1;\n }\n#if defined(__linux__)\n if (!keepalive) {\n return 0;\n }\n // tcp_keepalive_time\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &(int){300}, sizeof(int))) \n {\n return -1;\n }\n // tcp_keepalive_intvl\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &(int){30}, sizeof(int)))\n {\n return -1;\n }\n // tcp_keepalive_probes\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &(int){3}, sizeof(int))) {\n return -1;\n }\n#endif\n return 0;\n}\n\n#ifdef __linux__\ntypedef struct epoll_event event_t;\n#else\ntypedef struct kevent event_t;\n#endif\n\nstatic int event_fd(event_t *ev) {\n#ifdef __linux__\n return ev->data.fd;\n#else\n return ev->ident;\n#endif\n}\n\nstatic int getevents(int fd, event_t evs[], int nevs, bool wait_forever, \n int64_t timeout)\n{\n if (wait_forever) {\n#ifdef __linux__\n return epoll_wait(fd, evs, nevs, -1);\n#else\n return kevent(fd, NULL, 0, evs, nevs, 0);\n#endif\n } else {\n timeout = timeout < 0 ? 0 : \n timeout > 900000000 ? 900000000 : // 900ms\n timeout;\n#ifdef __linux__\n timeout = timeout / 1000000;\n return epoll_wait(fd, evs, nevs, timeout);\n#else\n struct timespec timespec = { .tv_nsec = timeout };\n return kevent(fd, NULL, 0, evs, nevs, ×pec);\n#endif\n }\n}\n\nstatic int addread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN | EPOLLEXCLUSIVE;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int addwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int evqueue(void) {\n#ifdef __linux__\n return epoll_create1(0);\n#else\n return kqueue();\n#endif\n}\n\nstruct bgworkctx { \n void (*work)(void *udata);\n void (*done)(struct net_conn *conn, void *udata);\n struct net_conn *conn;\n void *udata;\n bool writer;\n};\n\n// static void bgdone(struct bgworkctx *bgctx);\n\nstruct net_conn {\n int fd;\n struct net_conn *next; // for hashmap bucket\n bool closed;\n struct tls *tls;\n void *udata;\n char *out;\n size_t outlen;\n size_t outcap;\n struct bgworkctx *bgctx;\n struct qthreadctx *ctx;\n unsigned stat_cmd_get;\n unsigned stat_cmd_set;\n unsigned stat_get_hits;\n unsigned stat_get_misses;\n};\n\nstatic struct net_conn *conn_new(int fd, struct qthreadctx *ctx) {\n struct net_conn *conn = xmalloc(sizeof(struct net_conn));\n memset(conn, 0, sizeof(struct net_conn));\n conn->fd = fd;\n conn->ctx = ctx;\n return conn;\n}\n\nstatic void conn_free(struct net_conn *conn) {\n if (conn) {\n if (conn->out) {\n xfree(conn->out);\n }\n xfree(conn);\n }\n}\n\nvoid net_conn_out_ensure(struct net_conn *conn, size_t amount) {\n if (conn->outcap-conn->outlen >= amount) {\n return;\n }\n size_t cap = conn->outcap == 0 ? 16 : conn->outcap * 2;\n while (cap-conn->outlen < amount) {\n cap *= 2;\n }\n char *out = xmalloc(cap);\n memcpy(out, conn->out, conn->outlen);\n xfree(conn->out);\n conn->out = out;\n conn->outcap = cap;\n}\n\nvoid net_conn_out_write_byte_nocheck(struct net_conn *conn, char byte) {\n conn->out[conn->outlen++] = byte;\n}\n\nvoid net_conn_out_write_byte(struct net_conn *conn, char byte) {\n if (conn->outcap == conn->outlen) {\n net_conn_out_ensure(conn, 1);\n }\n net_conn_out_write_byte_nocheck(conn, byte);\n}\n\nvoid net_conn_out_write_nocheck(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n memcpy(conn->out+conn->outlen, data, nbytes);\n conn->outlen += nbytes;\n}\n\nvoid net_conn_out_write(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n if (conn->outcap-conn->outlen < nbytes) {\n net_conn_out_ensure(conn, nbytes);\n }\n net_conn_out_write_nocheck(conn, data, nbytes);\n}\n\nchar *net_conn_out(struct net_conn *conn) {\n return conn->out;\n}\n\nsize_t net_conn_out_len(struct net_conn *conn) {\n return conn->outlen;\n}\n\nsize_t net_conn_out_cap(struct net_conn *conn) {\n return conn->outcap;\n}\n\nvoid net_conn_out_setlen(struct net_conn *conn, size_t len) {\n assert(len < conn->outcap);\n conn->outlen = len;\n}\n\n\nbool net_conn_isclosed(struct net_conn *conn) {\n return conn->closed;\n}\n\nvoid net_conn_close(struct net_conn *conn) {\n conn->closed = true;\n}\n\nvoid net_conn_setudata(struct net_conn *conn, void *udata) {\n conn->udata = udata;\n}\n\nvoid *net_conn_udata(struct net_conn *conn) {\n return conn->udata;\n}\n\nstatic uint64_t hashfd(int fd) {\n return mix13((uint64_t)fd);\n}\n\n// map of connections\nstruct cmap {\n struct net_conn **buckets;\n size_t nbuckets;\n size_t len;\n};\n\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn);\n\nstatic void cmap_grow(struct cmap *cmap) {\n struct cmap cmap2 = { 0 };\n cmap2.nbuckets = cmap->nbuckets*2;\n size_t size = cmap2.nbuckets * sizeof(struct net_conn*);\n cmap2.buckets = xmalloc(size);\n memset(cmap2.buckets, 0, cmap2.nbuckets*sizeof(struct net_conn*));\n for (size_t i = 0; i < cmap->nbuckets; i++) {\n struct net_conn *conn = cmap->buckets[i];\n while (conn) {\n struct net_conn *next = conn->next;\n conn->next = 0;\n cmap_insert(&cmap2, conn);\n conn = next;\n }\n }\n xfree(cmap->buckets);\n memcpy(cmap, &cmap2, sizeof(struct cmap));\n}\n\n// Insert a connection into a map. \n// The connection MUST NOT exist in the map.\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n if (cmap->len >= cmap->nbuckets-(cmap->nbuckets>>2)) { // 75% load factor\n // if (cmap->len >= cmap->nbuckets) { // 100% load factor\n cmap_grow(cmap);\n }\n size_t i = hash % cmap->nbuckets;\n conn->next = cmap->buckets[i];\n cmap->buckets[i] = conn;\n cmap->len++;\n}\n\n// Return the connection or NULL if not exists.\nstatic struct net_conn *cmap_get(struct cmap *cmap, int fd) {\n uint32_t hash = hashfd(fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *conn = cmap->buckets[i];\n while (conn && conn->fd != fd) {\n conn = conn->next;\n }\n return conn;\n}\n\n// Delete connection from map. \n// The connection MUST exist in the map.\nstatic void cmap_delete(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *prev = 0;\n struct net_conn *iter = cmap->buckets[i];\n while (iter != conn) {\n prev = iter;\n iter = iter->next;\n }\n if (prev) {\n prev->next = iter->next;\n } else {\n cmap->buckets[i] = iter->next;\n }\n}\n\nstatic atomic_size_t nconns = 0;\nstatic atomic_size_t tconns = 0;\nstatic atomic_size_t rconns = 0;\n\nstatic pthread_mutex_t tls_ready_fds_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic int tls_ready_fds_cap = 0;\nstatic int tls_ready_fds_len = 0;\nstatic int *tls_ready_fds = 0;\n\nstatic void save_tls_fd(int fd) {\n pthread_mutex_lock(&tls_ready_fds_lock);\n if (tls_ready_fds_len == tls_ready_fds_cap) {\n tls_ready_fds_cap *= 2;\n if (tls_ready_fds_cap == 0) {\n tls_ready_fds_cap = 8;\n }\n tls_ready_fds = xrealloc(tls_ready_fds, tls_ready_fds_cap*sizeof(int));\n }\n tls_ready_fds[tls_ready_fds_len++] = fd;\n pthread_mutex_unlock(&tls_ready_fds_lock);\n}\n\nstatic bool del_tls_fd(int fd) {\n bool found = false;\n pthread_mutex_lock(&tls_ready_fds_lock);\n for (int i = 0; i < tls_ready_fds_len; i++) {\n if (tls_ready_fds[i] == fd) {\n tls_ready_fds[i] = tls_ready_fds[tls_ready_fds_len-1];\n tls_ready_fds_len--;\n found = true;\n break;\n }\n }\n pthread_mutex_unlock(&tls_ready_fds_lock);\n return found;\n}\n\nstruct qthreadctx {\n pthread_t th;\n int qfd;\n int index;\n int maxconns;\n int *sfd; // three entries\n bool tcpnodelay;\n bool keepalive;\n bool quickack;\n int queuesize;\n const char *unixsock;\n void *udata;\n bool uring;\n#ifndef NOURING\n struct io_uring ring;\n#endif\n void(*data)(struct net_conn*,const void*,size_t,void*);\n void(*opened)(struct net_conn*,void*);\n void(*closed)(struct net_conn*,void*);\n int nevents;\n event_t *events;\n atomic_int nconns;\n int ntlsconns;\n char *inpkts;\n struct net_conn **qreads;\n struct net_conn **qins;\n struct net_conn **qattachs;\n struct net_conn **qouts;\n struct net_conn **qcloses;\n char **qinpkts;\n int *qinpktlens; \n int nqreads;\n int nqins;\n int nqcloses;\n int nqattachs;\n int nqouts;\n int nthreads;\n \n uint64_t stat_cmd_get;\n uint64_t stat_cmd_set;\n uint64_t stat_get_hits;\n uint64_t stat_get_misses;\n\n struct qthreadctx *ctxs;\n struct cmap cmap;\n};\n\nstatic atomic_uint_fast64_t g_stat_cmd_get = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_set = 0;\nstatic atomic_uint_fast64_t g_stat_get_hits = 0;\nstatic atomic_uint_fast64_t g_stat_get_misses = 0;\n\ninline\nstatic void sumstats(struct net_conn *conn, struct qthreadctx *ctx) {\n ctx->stat_cmd_get += conn->stat_cmd_get;\n conn->stat_cmd_get = 0;\n ctx->stat_cmd_set += conn->stat_cmd_set;\n conn->stat_cmd_set = 0;\n ctx->stat_get_hits += conn->stat_get_hits;\n conn->stat_get_hits = 0;\n ctx->stat_get_misses += conn->stat_get_misses;\n conn->stat_get_misses = 0;\n}\n\ninline\nstatic void sumstats_global(struct qthreadctx *ctx) {\n atomic_fetch_add_explicit(&g_stat_cmd_get, ctx->stat_cmd_get, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_get = 0;\n atomic_fetch_add_explicit(&g_stat_cmd_set, ctx->stat_cmd_set, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_set = 0;\n atomic_fetch_add_explicit(&g_stat_get_hits, ctx->stat_get_hits, \n __ATOMIC_RELAXED);\n ctx->stat_get_hits = 0;\n atomic_fetch_add_explicit(&g_stat_get_misses, ctx->stat_get_misses, \n __ATOMIC_RELAXED);\n ctx->stat_get_misses = 0;\n}\n\nuint64_t stat_cmd_get(void) {\n uint64_t x = atomic_load_explicit(&g_stat_cmd_get, __ATOMIC_RELAXED);\n return x;\n}\n\nuint64_t stat_cmd_set(void) {\n return atomic_load_explicit(&g_stat_cmd_set, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_hits(void) {\n return atomic_load_explicit(&g_stat_get_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_misses(void) {\n return atomic_load_explicit(&g_stat_get_misses, __ATOMIC_RELAXED);\n}\n\ninline\nstatic void qreset(struct qthreadctx *ctx) {\n ctx->nqreads = 0;\n ctx->nqins = 0;\n ctx->nqcloses = 0;\n ctx->nqouts = 0;\n ctx->nqattachs = 0;\n}\n\ninline\nstatic void qaccept(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nevents; i++) {\n int fd = event_fd(&ctx->events[i]);\n struct net_conn *conn = cmap_get(&ctx->cmap, fd);\n if (!conn) {\n if ((fd == ctx->sfd[0] || fd == ctx->sfd[1] || fd == ctx->sfd[2])) {\n int sfd = fd;\n fd = accept(fd, 0, 0);\n if (fd == -1) {\n continue;\n }\n if (setnonblock(fd) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[0] || sfd == ctx->sfd[2]) {\n if (setkeepalive(fd, ctx->keepalive) == -1) {\n close(fd);\n continue;\n }\n if (settcpnodelay(fd, ctx->tcpnodelay) == -1) {\n close(fd);\n continue;\n }\n if (setquickack(fd, ctx->quickack) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[2]) {\n save_tls_fd(fd);\n }\n }\n static atomic_uint_fast64_t next_ctx_index = 0;\n int idx = atomic_fetch_add(&next_ctx_index, 1) % ctx->nthreads;\n if (addread(ctx->ctxs[idx].qfd, fd) == -1) {\n if (sfd == ctx->sfd[2]) {\n del_tls_fd(fd);\n }\n close(fd);\n continue;\n }\n continue;\n }\n size_t xnconns = atomic_fetch_add(&nconns, 1);\n if (xnconns >= (size_t)ctx->maxconns) {\n // rejected\n atomic_fetch_add(&rconns, 1);\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n continue;\n }\n bool istls = del_tls_fd(fd);\n conn = conn_new(fd, ctx);\n if (istls) {\n if (!tls_accept(conn->fd, &conn->tls)) {\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n conn_free(conn);\n continue;\n }\n ctx->ntlsconns++;\n }\n atomic_fetch_add_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_add_explicit(&tconns, 1, __ATOMIC_RELEASE);\n cmap_insert(&ctx->cmap, conn);\n ctx->opened(conn, ctx->udata);\n }\n if (conn->bgctx) {\n // BGWORK(2)\n // The connection has been added back to the event loop, but it\n // needs to be attached and restated.\n ctx->qattachs[ctx->nqattachs++] = conn;\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void handle_read(ssize_t n, char *pkt, struct net_conn *conn,\n struct qthreadctx *ctx)\n{\n assert(conn->outlen == 0);\n assert(conn->bgctx == 0);\n if (n <= 0) {\n if (n == 0 || errno != EAGAIN) {\n // read failed, close connection\n ctx->qcloses[ctx->nqcloses++] = conn;\n return;\n }\n assert(n == -1 && errno == EAGAIN);\n // even though there's an EAGAIN, still call the user data event\n // handler with an empty packet \n n = 0;\n }\n pkt[n] = '\\0';\n ctx->qins[ctx->nqins] = conn;\n ctx->qinpkts[ctx->nqins] = pkt;\n ctx->qinpktlens[ctx->nqins] = n;\n ctx->nqins++;\n}\n\ninline \nstatic void flush_conn(struct net_conn *conn, size_t written) {\n while (written < conn->outlen) {\n ssize_t n;\n if (conn->tls) {\n n = tls_write(conn->tls, conn->fd, conn->out+written, \n conn->outlen-written);\n } else {\n n = write(conn->fd, conn->out+written, conn->outlen-written);\n }\n if (n == -1) {\n if (errno == EAGAIN) {\n continue;\n }\n conn->closed = true;\n break;\n }\n written += n;\n }\n // either everything was written or the socket is closed\n conn->outlen = 0;\n}\n\ninline\nstatic void qattach(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nqattachs; i++) {\n // BGWORK(3)\n // A bgworker has finished, make sure it's added back into the \n // event loop in the correct state.\n struct net_conn *conn = ctx->qattachs[i];\n struct bgworkctx *bgctx = conn->bgctx;\n bgctx->done(conn, bgctx->udata);\n conn->bgctx = 0;\n assert(bgctx);\n xfree(bgctx);\n int ret = delwrite(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n ret = addread(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void qread(struct qthreadctx *ctx) {\n // Read incoming socket data\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // read incoming using uring\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_read(sqe, conn->fd, pkt, PACKETSIZE-1, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n assert(ret == ctx->nqreads);\n for (int i = 0; i < ctx->nqreads; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n = cqe->res;\n if (n < 0) {\n errno = -n;\n n = -1;\n }\n handle_read(n, pkt, conn, ctx);\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // read incoming data using standard syscalls.\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n;\n if (conn->tls) {\n n = tls_read(conn->tls, conn->fd, pkt, PACKETSIZE-1);\n } else {\n n = read(conn->fd, pkt, PACKETSIZE-1);\n }\n handle_read(n, pkt, conn, ctx);\n }\n#ifndef NOURING\n }\n#endif\n}\n\n\ninline\nstatic void qprocess(struct qthreadctx *ctx) {\n // process all new incoming data\n for (int i = 0; i < ctx->nqins; i++) {\n struct net_conn *conn = ctx->qins[i];\n char *p = ctx->qinpkts[i];\n int n = ctx->qinpktlens[i];\n ctx->data(conn, p, n, ctx->udata);\n sumstats(conn, ctx);\n if (conn->bgctx) {\n // BGWORK(1)\n // Connection entered background mode.\n // This means the connection is no longer in the event queue but\n // is still owned by this qthread. Once the bgwork is done the \n // connection will be added back to the queue with addwrite.\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n}\n\ninline\nstatic void qprewrite(struct qthreadctx *ctx) {\n (void)ctx;\n // TODO: perform any prewrite operations\n}\n\ninline\nstatic void qwrite(struct qthreadctx *ctx) {\n // Flush all outgoing socket data.\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // write outgoing using uring\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_write(sqe, conn->fd, conn->out, conn->outlen, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n for (int i = 0; i < ctx->nqouts; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qouts[i];\n ssize_t n = cqe->res;\n if (n == -EAGAIN) {\n n = 0;\n }\n if (n < 0) {\n conn->closed = true;\n } else {\n // Any extra data must be flushed using syscall write.\n flush_conn(conn, n);\n }\n // Either everything was written or the socket is closed\n conn->outlen = 0;\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // Write data using write syscall\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n#ifndef NOURING\n }\n#endif\n}\n\ninline\nstatic void qclose(struct qthreadctx *ctx) {\n // Close all sockets that need to be closed\n for (int i = 0; i < ctx->nqcloses; i++) {\n struct net_conn *conn = ctx->qcloses[i];\n ctx->closed(conn, ctx->udata);\n if (conn->tls) {\n tls_close(conn->tls, conn->fd);\n ctx->ntlsconns--;\n } else {\n close(conn->fd);\n }\n cmap_delete(&ctx->cmap, conn);\n atomic_fetch_sub_explicit(&nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_sub_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n conn_free(conn);\n }\n}\n\nstatic void *qthread(void *arg) {\n struct qthreadctx *ctx = arg;\n#ifndef NOURING\n if (ctx->uring) {\n if (io_uring_queue_init(ctx->queuesize, &ctx->ring, 0) < 0) {\n perror(\"# io_uring_queue_init\");\n abort();\n }\n }\n#endif\n // connection map\n memset(&ctx->cmap, 0, sizeof(struct cmap));\n ctx->cmap.nbuckets = 64;\n size_t size = ctx->cmap.nbuckets*sizeof(struct net_conn*);\n ctx->cmap.buckets = xmalloc(size);\n memset(ctx->cmap.buckets, 0, ctx->cmap.nbuckets*sizeof(struct net_conn*));\n\n ctx->events = xmalloc(sizeof(event_t)*ctx->queuesize);\n ctx->qreads = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->inpkts = xmalloc(PACKETSIZE*ctx->queuesize);\n ctx->qins = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qinpkts = xmalloc(sizeof(char*)*ctx->queuesize);\n ctx->qinpktlens = xmalloc(sizeof(int)*ctx->queuesize);\n ctx->qcloses = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qouts = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qattachs = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n\n while (1) {\n sumstats_global(ctx);\n ctx->nevents = getevents(ctx->qfd, ctx->events, ctx->queuesize, 1, 0);\n if (ctx->nevents <= 0) {\n if (ctx->nevents == -1 && errno != EINTR) {\n perror(\"# getevents\");\n abort();\n }\n continue;\n }\n // reset, accept, attach, read, process, prewrite, write, close\n qreset(ctx); // reset the step queues\n qaccept(ctx); // accept incoming connections\n qattach(ctx); // attach bg workers. uncommon\n qread(ctx); // read from sockets\n qprocess(ctx); // process new socket data\n qprewrite(ctx); // perform any prewrite operations, such as fsync\n qwrite(ctx); // write to sockets\n qclose(ctx); // close any sockets that need closing\n }\n return 0;\n}\n\nstatic int listen_tcp(const char *host, const char *port, bool reuseport, \n int backlog)\n{\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return 0;\n }\n int ret;\n host = host ? host : \"127.0.0.1\";\n port = port ? port : \"0\";\n struct addrinfo hints = { 0 }, *addrs;\n hints.ai_family = AF_UNSPEC; \n hints.ai_socktype = SOCK_STREAM;\n hints.ai_protocol = IPPROTO_TCP;\n ret = getaddrinfo(host, port, &hints, &addrs);\n if (ret != 0) {\n fprintf(stderr, \"# getaddrinfo: %s: %s:%s\", gai_strerror(ret), host,\n port);\n abort();\n }\n struct addrinfo *ainfo = addrs;\n while (ainfo->ai_family != PF_INET) {\n ainfo = ainfo->ai_next;\n }\n assert(ainfo);\n int fd = socket(ainfo->ai_family, ainfo->ai_socktype, ainfo->ai_protocol);\n if (fd == -1) {\n perror(\"# socket(tcp)\");\n abort();\n }\n if (reuseport) {\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &(int){1}, \n sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseport)\");\n abort();\n }\n }\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &(int){1},sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseaddr)\");\n abort();\n }\n ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n ret = bind(fd, ainfo->ai_addr, ainfo->ai_addrlen);\n if (ret == -1) {\n fprintf(stderr, \"# bind(tcp): %s:%s\", host, port);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(tcp): %s:%s\", host, port);\n abort();\n }\n freeaddrinfo(addrs);\n return fd;\n}\n\nstatic int listen_unixsock(const char *unixsock, int backlog) {\n if (!unixsock || !*unixsock) {\n return 0;\n }\n struct sockaddr_un unaddr;\n int fd = socket(AF_UNIX, SOCK_STREAM, 0);\n if (fd == -1) {\n perror(\"# socket(unix)\");\n abort();\n }\n memset(&unaddr, 0, sizeof(struct sockaddr_un));\n unaddr.sun_family = AF_UNIX;\n strncpy(unaddr.sun_path, unixsock, sizeof(unaddr.sun_path) - 1);\n int ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n unlink(unixsock);\n ret = bind(fd, (struct sockaddr *)&unaddr, sizeof(struct sockaddr_un));\n if (ret == -1) {\n fprintf(stderr, \"# bind(unix): %s\", unixsock);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(unix): %s\", unixsock);\n abort();\n }\n return fd;\n}\n\nstatic atomic_uintptr_t all_ctxs = 0;\n\n// current connections\nsize_t net_nconns(void) {\n return atomic_load_explicit(&nconns, __ATOMIC_ACQUIRE);\n}\n\n// total connections ever\nsize_t net_tconns(void) {\n return atomic_load_explicit(&tconns, __ATOMIC_ACQUIRE);\n}\n\n// total rejected connections ever\nsize_t net_rconns(void) {\n return atomic_load_explicit(&rconns, __ATOMIC_ACQUIRE);\n}\n\nstatic void warmupunix(const char *unixsock, int nsocks) {\n if (!unixsock || !*unixsock) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n socks[i] = socket(AF_UNIX, SOCK_STREAM, 0);\n if (socks[i] == -1) {\n socks[i] = 0;\n continue;\n }\n struct sockaddr_un addr;\n memset(&addr, 0, sizeof(struct sockaddr_un));\n addr.sun_family = AF_UNIX;\n strncpy(addr.sun_path, unixsock, sizeof(addr.sun_path) - 1);\n if (connect(socks[i], (struct sockaddr *)&addr, \n sizeof(struct sockaddr_un)) == -1)\n {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup unix socket (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\n\nstatic void warmuptcp(const char *host, const char *port, int nsocks) {\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n struct addrinfo hints, *res;\n memset(&hints, 0, sizeof(hints));\n hints.ai_family = AF_INET;\n hints.ai_socktype = SOCK_STREAM;\n int err = getaddrinfo(host, port, &hints, &res);\n if (err != 0) {\n continue;\n }\n socks[i] = socket(res->ai_family, res->ai_socktype, res->ai_protocol);\n if (socks[i] == -1) {\n freeaddrinfo(res);\n continue;\n }\n int ret = connect(socks[i], res->ai_addr, res->ai_addrlen);\n freeaddrinfo(res);\n if (ret == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup tcp (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\nstatic void *thwarmup(void *arg) {\n // Perform a warmup of the epoll queues and listeners by making a quick\n // connection to each.\n struct net_opts *opts = arg;\n warmupunix(opts->unixsock, opts->nthreads*2);\n warmuptcp(opts->host, opts->port, opts->nthreads*2);\n return 0;\n}\n\nvoid net_main(struct net_opts *opts) {\n (void)delread;\n int sfd[3] = {\n listen_tcp(opts->host, opts->port, opts->reuseport, opts->backlog),\n listen_unixsock(opts->unixsock, opts->backlog),\n listen_tcp(opts->host, opts->tlsport, opts->reuseport, opts->backlog),\n };\n if (!sfd[0] && !sfd[1] && !sfd[2]) {\n printf(\"# No listeners provided\\n\");\n abort();\n }\n opts->listening(opts->udata);\n struct qthreadctx *ctxs = xmalloc(sizeof(struct qthreadctx)*opts->nthreads);\n memset(ctxs, 0, sizeof(struct qthreadctx)*opts->nthreads);\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n ctx->nthreads = opts->nthreads;\n ctx->tcpnodelay = opts->tcpnodelay;\n ctx->keepalive = opts->keepalive;\n ctx->quickack = opts->quickack;\n ctx->uring = !opts->nouring;\n ctx->ctxs = ctxs;\n ctx->index = i;\n ctx->maxconns = opts->maxconns;\n ctx->sfd = sfd;\n ctx->data = opts->data;\n ctx->udata = opts->udata;\n ctx->opened = opts->opened;\n ctx->closed = opts->closed;\n ctx->qfd = evqueue();\n if (ctx->qfd == -1) {\n perror(\"# evqueue\");\n abort();\n }\n atomic_init(&ctx->nconns, 0);\n for (int j = 0; j < 3; j++) {\n if (sfd[j]) {\n int ret = addread(ctx->qfd, sfd[j]);\n if (ret == -1) {\n perror(\"# addread\");\n abort();\n }\n }\n }\n ctx->unixsock = opts->unixsock;\n ctx->queuesize = opts->queuesize;\n }\n atomic_store(&all_ctxs, (uintptr_t)(void*)ctxs);\n opts->ready(opts->udata);\n if (!opts->nowarmup) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thwarmup, opts);\n if (ret != -1) {\n pthread_detach(th);\n }\n }\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n if (i == opts->nthreads-1) {\n qthread(ctx);\n } else {\n int ret = pthread_create(&ctx->th, 0, qthread, ctx);\n if (ret == -1) {\n perror(\"# pthread_create\");\n abort();\n }\n }\n }\n}\n\nstatic void *bgwork(void *arg) {\n struct bgworkctx *bgctx = arg;\n bgctx->work(bgctx->udata);\n // We are not in the same thread context as the event loop that owns this\n // connection. Adding the writer to the queue will allow for the loop\n // thread to gracefully continue the operation and then call the 'done'\n // callback.\n int ret = addwrite(bgctx->conn->ctx->qfd, bgctx->conn->fd);\n assert(ret == 0); (void)ret;\n return 0;\n}\n\n// net_conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool net_conn_bgwork(struct net_conn *conn, void (*work)(void *udata), \n void (*done)(struct net_conn *conn, void *udata), void *udata)\n{\n if (conn->bgctx || conn->closed) {\n return false;\n }\n struct qthreadctx *ctx = conn->ctx;\n int ret = delread(ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n conn->bgctx = xmalloc(sizeof(struct bgworkctx));\n memset(conn->bgctx, 0, sizeof(struct bgworkctx));\n conn->bgctx->conn = conn;\n conn->bgctx->done = done;\n conn->bgctx->work = work;\n conn->bgctx->udata = udata;\n pthread_t th;\n if (pthread_create(&th, 0, bgwork, conn->bgctx) == -1) {\n // Failed to create thread. Revert and return false.\n ret = addread(ctx->qfd, conn->fd);\n assert(ret == 0);\n xfree(conn->bgctx);\n conn->bgctx = 0;\n return false;\n } else {\n pthread_detach(th);\n }\n return true;\n}\n\nbool net_conn_bgworking(struct net_conn *conn) {\n return conn->bgctx != 0;\n}\n\nvoid net_stat_cmd_get_incr(struct net_conn *conn) {\n conn->stat_cmd_get++;\n}\n\nvoid net_stat_cmd_set_incr(struct net_conn *conn) {\n conn->stat_cmd_set++;\n}\n\nvoid net_stat_get_hits_incr(struct net_conn *conn) {\n conn->stat_get_hits++;\n}\n\nvoid net_stat_get_misses_incr(struct net_conn *conn) {\n conn->stat_get_misses++;\n}\n\nbool net_conn_istls(struct net_conn *conn) {\n return conn->tls != 0;\n}\n"], ["/pogocache/src/main.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit main.c is the main entry point for the Pogocache program.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"conn.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"save.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"pogocache.h\"\n#include \"gitinfo.h\"\n#include \"uring.h\"\n\n// default user flags\nint nthreads = 0; // number of client threads\nchar *port = \"9401\"; // default tcp port (non-tls)\nchar *host = \"127.0.0.1\"; // default hostname or ip address\nchar *persist = \"\"; // file to load and save data to\nchar *unixsock = \"\"; // use a unix socket\nchar *reuseport = \"no\"; // reuse tcp port for other programs\nchar *tcpnodelay = \"yes\"; // disable nagle's algorithm\nchar *quickack = \"no\"; // enable quick acks\nchar *usecas = \"no\"; // enable compare and store\nchar *keepalive = \"yes\"; // socket keepalive setting\nint backlog = 1024; // network socket accept backlog\nint queuesize = 128; // event queue size\nchar *maxmemory = \"80%\"; // Maximum memory allowed - 80% total system\nchar *evict = \"yes\"; // evict keys when maxmemory reached\nint loadfactor = 75; // hashmap load factor\nchar *keysixpack = \"yes\"; // use sixpack compression on keys\nchar *trackallocs = \"no\"; // track allocations (for debugging)\nchar *auth = \"\"; // auth token or pa\nchar *tlsport = \"\"; // enable tls over tcp port\nchar *tlscertfile = \"\"; // tls cert file\nchar *tlskeyfile = \"\"; // tls key file\nchar *tlscacertfile = \"\"; // tls ca cert file\nchar *uring = \"yes\"; // use uring (linux only)\nint maxconns = 1024; // maximum number of sockets\nchar *noticker = \"no\";\nchar *warmup = \"yes\";\n\n// Global variables calculated in main().\n// These should never change during the lifetime of the process.\n// Other source files must use the \"extern const\" specifier.\nchar *version;\nchar *githash;\nuint64_t seed;\nsize_t sysmem;\nsize_t memlimit;\nint verb; // verbosity, 0=no, 1=verbose, 2=very, 3=extremely\nbool usesixpack;\nint useallocator;\nbool usetrackallocs;\nbool useevict;\nint nshards;\nbool usetls; // use tls security (pemfile required);\nbool useauth; // use auth password\nbool usecolor; // allow color in terminal\nchar *useid; // instance id (unique to every process run)\nint64_t procstart; // proc start boot time, for uptime stat\n\n// Global atomic variable. These are safe to read and modify by other source\n// files, as long as those sources use \"atomic_\" methods.\natomic_int shutdownreq; // shutdown request counter\natomic_int_fast64_t flush_delay; // delay in seconds to next async flushall\natomic_bool sweep; // mark for async sweep, asap\natomic_bool registered; // registration is active\natomic_bool lowmem; // system is in low memory mode.\n\nstruct pogocache *cache;\n\n// min max robinhood load factor (75% performs pretty well)\n#define MINLOADFACTOR_RH 55\n#define MAXLOADFACTOR_RH 95\n\nstatic void ready(void *udata) {\n (void)udata;\n printf(\"* Ready to accept connections\\n\");\n}\n\n#define noopt \"%s\"\n\n#define HELP(format, ...) \\\n fprintf(file, format, ##__VA_ARGS__)\n\n#define HOPT(opt, desc, format, ...) \\\n fprintf(file, \" \"); \\\n fprintf(file, \"%-22s \", opt); \\\n fprintf(file, \"%-30s \", desc); \\\n if (strcmp(format, noopt) != 0) { \\\n fprintf(file, \"(default: \" format \")\", ##__VA_ARGS__); \\\n } \\\n fprintf(file, \"\\n\");\n\nstatic int calc_nshards(int nprocs) {\n switch (nprocs) {\n case 1: return 64;\n case 2: return 128;\n case 3: return 256;\n case 4: return 512;\n case 5: return 1024;\n case 6: return 2048;\n default: return 4096;\n }\n}\n\nstatic void showhelp(FILE *file) {\n int nprocs = sys_nprocs();\n int nshards = calc_nshards(nprocs);\n\n HELP(\"Usage: %s [options]\\n\", \"pogocache\");\n HELP(\"\\n\");\n\n HELP(\"Basic options:\\n\");\n HOPT(\"-h hostname\", \"listening host\", \"%s\", host);\n HOPT(\"-p port\", \"listening port\", \"%s\", port);\n HOPT(\"-s socket\", \"unix socket file\", \"%s\", *unixsock?unixsock:\"none\");\n\n HOPT(\"-v,-vv,-vvv\", \"verbose logging level\", noopt, \"\");\n HELP(\"\\n\");\n \n HELP(\"Additional options:\\n\");\n HOPT(\"--threads count\", \"number of threads\", \"%d\", nprocs);\n HOPT(\"--maxmemory value\", \"set max memory usage\", \"%s\", maxmemory);\n HOPT(\"--evict yes/no\", \"evict keys at maxmemory\", \"%s\", evict);\n HOPT(\"--persist path\", \"persistence file\", \"%s\", *persist?persist:\"none\");\n HOPT(\"--maxconns conns\", \"maximum connections\", \"%d\", maxconns);\n HELP(\"\\n\");\n \n HELP(\"Security options:\\n\");\n HOPT(\"--auth passwd\", \"auth token or password\", \"%s\", *auth?auth:\"none\");\n#ifndef NOOPENSSL\n HOPT(\"--tlsport port\", \"enable tls on port\", \"%s\", \"none\");\n HOPT(\"--tlscert certfile\", \"tls cert file\", \"%s\", \"none\");\n HOPT(\"--tlskey keyfile\", \"tls key file\", \"%s\", \"none\");\n HOPT(\"--tlscacert cacertfile\", \"tls ca-cert file\", \"%s\", \"none\");\n#endif\n HELP(\"\\n\");\n\n HELP(\"Advanced options:\\n\");\n HOPT(\"--shards count\", \"number of shards\", \"%d\", nshards);\n HOPT(\"--backlog count\", \"accept backlog\", \"%d\", backlog);\n HOPT(\"--queuesize count\", \"event queuesize size\", \"%d\", queuesize);\n HOPT(\"--reuseport yes/no\", \"reuseport for tcp\", \"%s\", reuseport);\n HOPT(\"--tcpnodelay yes/no\", \"disable nagle's algo\", \"%s\", tcpnodelay);\n HOPT(\"--quickack yes/no\", \"use quickack (linux)\", \"%s\", quickack);\n HOPT(\"--uring yes/no\", \"use uring (linux)\", \"%s\", uring);\n HOPT(\"--loadfactor percent\", \"hashmap load factor\", \"%d\", loadfactor);\n HOPT(\"--keysixpack yes/no\", \"sixpack compress keys\", \"%s\", keysixpack);\n HOPT(\"--cas yes/no\", \"use compare and store\", \"%s\", usecas);\n HELP(\"\\n\");\n}\n\nstatic void showversion(FILE *file) {\n#ifdef CCSANI\n fprintf(file, \"pogocache %s (CCSANI)\\n\", version);\n#else\n fprintf(file, \"pogocache %s\\n\", version);\n#endif\n}\n\nstatic size_t calc_memlimit(char *maxmemory) {\n if (strcmp(maxmemory, \"unlimited\") == 0) {\n return SIZE_MAX;\n }\n char *oval = maxmemory;\n while (isspace(*maxmemory)) {\n maxmemory++;\n }\n char *end;\n errno = 0;\n double mem = strtod(maxmemory, &end);\n if (errno || !(mem > 0) || !isfinite(mem)) {\n goto fail;\n }\n while (isspace(*end)) {\n end++;\n }\n #define exteq(c) \\\n (tolower(end[0])==c&& (!end[1]||(tolower(end[1])=='b'&&!end[2])))\n\n if (strcmp(end, \"\") == 0) {\n return mem;\n } else if (strcmp(end, \"%\") == 0) {\n return (((double)mem)/100.0) * sysmem;\n } else if (exteq('k')) {\n return mem*1024.0;\n } else if (exteq('m')) {\n return mem*1024.0*1024.0;\n } else if (exteq('g')) {\n return mem*1024.0*1024.0*1024.0;\n } else if (exteq('t')) {\n return mem*1024.0*1024.0*1024.0*1024.0;\n }\nfail:\n fprintf(stderr, \"# Invalid maxmemory '%s'\\n\", oval);\n showhelp(stderr);\n exit(1);\n}\n\nstatic size_t setmaxrlimit(void) {\n size_t maxconns = 0;\n struct rlimit rl;\n if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {\n maxconns = rl.rlim_max;\n rl.rlim_cur = rl.rlim_max;\n rl.rlim_max = rl.rlim_max;\n if (setrlimit(RLIMIT_NOFILE, &rl) != 0) {\n perror(\"# setrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n } else {\n perror(\"# getrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n return maxconns;\n}\n\nstatic void evicted(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)value, (void)valuelen, (void)expires, (void)udata;\n return;\n printf(\". evicted shard=%d, reason=%d, time=%\" PRIi64 \", key='%.*s'\"\n \", flags=%\" PRIu32 \", cas=%\" PRIu64 \"\\n\",\n shard, reason, time, (int)keylen, (char*)key, flags, cas);\n}\n\n#define BEGIN_FLAGS() \\\n if (0) {\n#define BFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option %s missing value\\n\", opt); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n }\n#define TFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n if (!dryrun) { \\\n op; \\\n }\n#define AFLAG(name, op) \\\n } else if (strcmp(argv[i], \"--\" name) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option --%s missing value\\n\", name); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n } \\\n } else if (strstr(argv[i], \"--\" name \"=\") == argv[i]) { \\\n if (!dryrun) { \\\n char *flag = argv[i]+strlen(name)+3; op; \\\n }\n#define END_FLAGS() \\\n } else { \\\n fprintf(stderr, \"# Unknown program option %s\\n\", argv[i]); \\\n exit(1); \\\n }\n\n#define INVALID_FLAG(name, value) \\\n fprintf(stderr, \"# Option --%s is invalid\\n\", name); \\\n exit(1);\n\nstatic atomic_bool loaded = false;\n\nvoid sigterm(int sig) {\n if (sig == SIGINT || sig == SIGTERM) {\n if (!atomic_load(&loaded) || !*persist) {\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n if (*persist) {\n printf(\"* Saving data to %s, please wait...\\n\", persist);\n int ret = save(persist, true);\n if (ret != 0) {\n perror(\"# Save failed\");\n _Exit(1);\n }\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n\n int count = atomic_fetch_add(&shutdownreq, 1);\n if (count > 0 && sig == SIGINT) {\n printf(\"# User forced shutdown\\n\");\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n }\n}\n\nstatic void tick(void) {\n if (!atomic_load_explicit(&loaded, __ATOMIC_ACQUIRE)) {\n return;\n }\n // Memory usage check\n if (memlimit < SIZE_MAX) {\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n size_t memusage = meminfo.rss;\n if (!lowmem) {\n if (memusage > memlimit) {\n atomic_store(&lowmem, true);\n if (verb > 0) {\n printf(\"# Low memory mode on\\n\");\n }\n }\n } else {\n if (memusage < memlimit) {\n atomic_store(&lowmem, false);\n if (verb > 0) {\n printf(\"# Low memory mode off\\n\");\n }\n }\n }\n }\n\n // Print allocations to terminal.\n if (usetrackallocs) {\n printf(\". keys=%zu, allocs=%zu, conns=%zu\\n\",\n pogocache_count(cache, 0), xallocs(), net_nconns());\n }\n\n}\n\nstatic void *ticker(void *arg) {\n (void)arg;\n while (1) {\n tick();\n sleep(1);\n }\n return 0;\n}\n\nstatic void listening(void *udata) {\n (void)udata;\n printf(\"* Network listener established\\n\");\n if (*persist) {\n if (!cleanwork(persist)) {\n // An error message has already been printed\n _Exit(0);\n }\n if (access(persist, F_OK) == 0) {\n printf(\"* Loading data from %s, please wait...\\n\", persist);\n struct load_stats stats;\n int64_t start = sys_now();\n int ret = load(persist, true, &stats);\n if (ret != 0) {\n perror(\"# Load failed\");\n _Exit(1);\n }\n double elapsed = (sys_now()-start)/1e9;\n printf(\"* Loaded %zu entries (%zu expired) (%.3f MB in %.3f secs) \"\n \"(%.0f entries/sec, %.0f MB/sec) \\n\", \n stats.ninserted, stats.nexpired,\n stats.csize/1024.0/1024.0, elapsed, \n (stats.ninserted+stats.nexpired)/elapsed, \n stats.csize/1024.0/1024.0/elapsed);\n }\n }\n atomic_store(&loaded, true);\n}\n\nstatic void yield(void *udata) {\n (void)udata;\n sched_yield();\n}\n\nint main(int argc, char *argv[]) {\n procstart = sys_now();\n\n // Intercept signals\n signal(SIGPIPE, SIG_IGN);\n signal(SIGINT, sigterm);\n signal(SIGTERM, sigterm);\n\n // Line buffer logging so pipes will stream.\n setvbuf(stdout, 0, _IOLBF, 0);\n setvbuf(stderr, 0, _IOLBF, 0);\n char guseid[17];\n memset(guseid, 0, 17);\n useid = guseid;\n sys_genuseid(useid); \n const char *maxmemorymb = 0;\n seed = sys_seed();\n verb = 0;\n usetls = false;\n useauth = false;\n lowmem = false;\n version = GITVERS;\n githash = GITHASH;\n\n \n\n\n if (uring_available()) {\n uring = \"yes\";\n } else {\n uring = \"no\";\n }\n\n atomic_init(&shutdownreq, 0);\n atomic_init(&flush_delay, 0);\n atomic_init(&sweep, false);\n atomic_init(®istered, false);\n\n // Parse program flags\n for (int ii = 0; ii < 2; ii++) {\n bool dryrun = ii == 0;\n for (int i = 1; i < argc; i++) {\n if (strcmp(argv[i], \"--help\") == 0) {\n showhelp(stdout);\n exit(0);\n }\n if (strcmp(argv[i], \"--version\") == 0) {\n showversion(stdout);\n exit(0);\n }\n BEGIN_FLAGS()\n BFLAG(\"-p\", port = flag)\n BFLAG(\"-h\", host = flag)\n BFLAG(\"-s\", unixsock = flag)\n TFLAG(\"-v\", verb = 1)\n TFLAG(\"-vv\", verb = 2)\n TFLAG(\"-vvv\", verb = 3)\n AFLAG(\"port\", port = flag)\n AFLAG(\"threads\", nthreads = atoi(flag))\n AFLAG(\"shards\", nshards = atoi(flag))\n AFLAG(\"backlog\", backlog = atoi(flag))\n AFLAG(\"queuesize\", queuesize = atoi(flag))\n AFLAG(\"maxmemory\", maxmemory = flag)\n AFLAG(\"evict\", evict = flag)\n AFLAG(\"reuseport\", reuseport = flag)\n AFLAG(\"uring\", uring = flag)\n AFLAG(\"tcpnodelay\", tcpnodelay = flag)\n AFLAG(\"keepalive\", keepalive = flag)\n AFLAG(\"quickack\", quickack = flag)\n AFLAG(\"trackallocs\", trackallocs = flag)\n AFLAG(\"cas\", usecas = flag)\n AFLAG(\"maxconns\", maxconns = atoi(flag))\n AFLAG(\"loadfactor\", loadfactor = atoi(flag))\n AFLAG(\"sixpack\", keysixpack = flag)\n AFLAG(\"seed\", seed = strtoull(flag, 0, 10))\n AFLAG(\"auth\", auth = flag)\n AFLAG(\"persist\", persist = flag)\n AFLAG(\"noticker\", noticker = flag)\n AFLAG(\"warmup\", warmup = flag)\n#ifndef NOOPENSSL\n // TLS flags\n AFLAG(\"tlsport\", tlsport = flag)\n AFLAG(\"tlscert\", tlscertfile = flag)\n AFLAG(\"tlscacert\", tlscacertfile = flag)\n AFLAG(\"tlskey\", tlskeyfile = flag)\n#endif\n // Hidden or alternative flags\n BFLAG(\"-t\", nthreads = atoi(flag)) // --threads=\n BFLAG(\"-m\", maxmemorymb = flag) // --maxmemory=M\n TFLAG(\"-M\", evict = \"no\") // --evict=no\n END_FLAGS()\n }\n }\n\n usecolor = isatty(fileno(stdout));\n\n if (strcmp(evict, \"yes\") == 0) {\n useevict = true;\n } else if (strcmp(evict, \"no\") == 0) {\n useevict = false;\n } else {\n INVALID_FLAG(\"evict\", evict);\n }\n\n bool usereuseport;\n if (strcmp(reuseport, \"yes\") == 0) {\n usereuseport = true;\n } else if (strcmp(reuseport, \"no\") == 0) {\n usereuseport = false;\n } else {\n INVALID_FLAG(\"reuseport\", reuseport);\n }\n\n if (strcmp(trackallocs, \"yes\") == 0) {\n usetrackallocs = true;\n } else if (strcmp(trackallocs, \"no\") == 0) {\n usetrackallocs = false;\n } else {\n INVALID_FLAG(\"trackallocs\", trackallocs);\n }\n\n bool usetcpnodelay;\n if (strcmp(tcpnodelay, \"yes\") == 0) {\n usetcpnodelay = true;\n } else if (strcmp(tcpnodelay, \"no\") == 0) {\n usetcpnodelay = false;\n } else {\n INVALID_FLAG(\"tcpnodelay\", tcpnodelay);\n }\n\n bool usekeepalive;\n if (strcmp(keepalive, \"yes\") == 0) {\n usekeepalive = true;\n } else if (strcmp(keepalive, \"no\") == 0) {\n usekeepalive = false;\n } else {\n INVALID_FLAG(\"keepalive\", keepalive);\n }\n\n\n bool usecasflag;\n if (strcmp(usecas, \"yes\") == 0) {\n usecasflag = true;\n } else if (strcmp(usecas, \"no\") == 0) {\n usecasflag = false;\n } else {\n INVALID_FLAG(\"usecas\", usecas);\n }\n\n if (maxconns <= 0) {\n maxconns = 1024;\n }\n\n\n#ifndef __linux__\n bool useuring = false;\n#else\n bool useuring;\n if (strcmp(uring, \"yes\") == 0) {\n useuring = true;\n } else if (strcmp(uring, \"no\") == 0) {\n useuring = false;\n } else {\n INVALID_FLAG(\"uring\", uring);\n }\n if (useuring) {\n if (!uring_available()) {\n useuring = false;\n }\n }\n#endif\n\n#ifndef __linux__\n quickack = \"no\";\n#endif\n bool usequickack;\n if (strcmp(quickack, \"yes\") == 0) {\n usequickack = true;\n } else if (strcmp(quickack, \"no\") == 0) {\n usequickack = false;\n } else {\n INVALID_FLAG(\"quickack\", quickack);\n }\n\n if (strcmp(keysixpack, \"yes\") == 0) {\n usesixpack = true;\n } else if (strcmp(keysixpack, \"no\") == 0) {\n usesixpack = false;\n } else {\n INVALID_FLAG(\"sixpack\", keysixpack);\n }\n\n // Threads\n if (nthreads <= 0) {\n nthreads = sys_nprocs();\n } else if (nthreads > 4096) {\n nthreads = 4096; \n }\n\n if (nshards == 0) {\n nshards = calc_nshards(nthreads);\n }\n if (nshards <= 0 || nshards > 65536) {\n nshards = 65536;\n }\n\n if (loadfactor < MINLOADFACTOR_RH) {\n loadfactor = MINLOADFACTOR_RH;\n printf(\"# loadfactor minumum set to %d\\n\", MINLOADFACTOR_RH);\n } else if (loadfactor > MAXLOADFACTOR_RH) {\n loadfactor = MAXLOADFACTOR_RH;\n printf(\"# loadfactor maximum set to %d\\n\", MAXLOADFACTOR_RH);\n }\n\n if (queuesize < 1) {\n queuesize = 1;\n printf(\"# queuesize adjusted to 1\\n\");\n } else if (queuesize > 4096) {\n queuesize = 4096;\n printf(\"# queuesize adjusted to 4096\\n\");\n }\n\n if (maxmemorymb) {\n size_t sz = strlen(maxmemorymb)+2;\n char *str = xmalloc(sz);\n snprintf(str, sz, \"%sM\", maxmemorymb);\n maxmemory = str;\n }\n\n if (!*port || strcmp(port, \"0\") == 0) {\n port = \"\";\n }\n\n if (!*tlsport || strcmp(tlsport, \"0\") == 0) {\n usetls = false;\n tlsport = \"\";\n } else {\n usetls = true;\n tls_init();\n }\n\n if (*auth) {\n useauth = true;\n }\n setmaxrlimit();\n sysmem = sys_memory();\n memlimit = calc_memlimit(maxmemory);\n\n if (memlimit == SIZE_MAX) {\n evict = \"no\";\n useevict = false;\n }\n\n struct pogocache_opts opts = {\n .yield = yield,\n .seed = seed,\n .malloc = xmalloc,\n .free = xfree,\n .nshards = nshards,\n .loadfactor = loadfactor,\n .usecas = usecasflag,\n .evicted = evicted,\n .allowshrink = true,\n .usethreadbatch = true,\n };\n // opts.yield = 0;\n\n cache = pogocache_new(&opts);\n if (!cache) {\n perror(\"pogocache_new\");\n abort();\n }\n\n // Print the program details\n printf(\"* Pogocache (pid: %d, arch: %s%s, version: %s, git: %s)\\n\",\n getpid(), sys_arch(), sizeof(uintptr_t)==4?\", mode: 32-bit\":\"\", version,\n githash);\n char buf0[64], buf1[64];\n char buf2[64];\n if (memlimit < SIZE_MAX) {\n snprintf(buf2, sizeof(buf2), \"%.0f%%/%s\", (double)memlimit/sysmem*100.0,\n memstr(memlimit, buf1));\n } else {\n strcpy(buf2, \"unlimited\");\n }\n printf(\"* Memory (system: %s, max: %s, evict: %s)\\n\", memstr(sysmem, buf0),\n buf2, evict);\n printf(\"* Features (verbosity: %s, sixpack: %s, cas: %s, persist: %s, \"\n \"uring: %s)\\n\",\n verb==0?\"normal\":verb==1?\"verbose\":verb==2?\"very\":\"extremely\",\n keysixpack, usecas, *persist?persist:\"none\", useuring?\"yes\":\"no\");\n char tcp_addr[256];\n snprintf(tcp_addr, sizeof(tcp_addr), \"%s:%s\", host, port);\n printf(\"* Network (port: %s, unixsocket: %s, backlog: %d, reuseport: %s, \"\n \"maxconns: %d)\\n\", *port?port:\"none\", *unixsock?unixsock:\"none\",\n backlog, reuseport, maxconns);\n printf(\"* Socket (tcpnodelay: %s, keepalive: %s, quickack: %s)\\n\",\n tcpnodelay, keepalive, quickack);\n printf(\"* Threads (threads: %d, queuesize: %d)\\n\", nthreads, queuesize);\n printf(\"* Shards (shards: %d, loadfactor: %d%%)\\n\", nshards, loadfactor);\n printf(\"* Security (auth: %s, tlsport: %s)\\n\", \n strlen(auth)>0?\"enabled\":\"disabled\", *tlsport?tlsport:\"none\");\n if (strcmp(noticker,\"yes\") == 0) {\n printf(\"# NO TICKER\\n\");\n } else {\n pthread_t th;\n int ret = pthread_create(&th, 0, ticker, 0);\n if (ret == -1) {\n perror(\"# pthread_create(ticker)\");\n exit(1);\n }\n }\n#ifdef DATASETOK\n printf(\"# DATASETOK\\n\");\n#endif\n#ifdef CMDGETNIL\n printf(\"# CMDGETNIL\\n\");\n#endif\n#ifdef CMDSETOK\n printf(\"# CMDSETOK\\n\");\n#endif\n#ifdef ENABLELOADREAD\n printf(\"# ENABLELOADREAD\\n\");\n#endif\n struct net_opts nopts = {\n .host = host,\n .port = port,\n .tlsport = tlsport,\n .unixsock = unixsock,\n .reuseport = usereuseport,\n .tcpnodelay = usetcpnodelay,\n .keepalive = usekeepalive,\n .quickack = usequickack,\n .backlog = backlog,\n .queuesize = queuesize,\n .nthreads = nthreads,\n .nowarmup = strcmp(warmup, \"no\") == 0,\n .nouring = !useuring,\n .listening = listening,\n .ready = ready,\n .data = evdata,\n .opened = evopened,\n .closed = evclosed,\n .maxconns = maxconns,\n };\n net_main(&nopts);\n return 0;\n}\n"], ["/pogocache/src/postgres.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit postgres.c provides the parser for the Postgres wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n\n// #define PGDEBUG\n\n#define TEXTOID 25\n#define BYTEAOID 17\n\nextern const char *version;\nextern const char *auth;\n\n#ifdef PGDEBUG\n#define dprintf printf\n#else\n#define dprintf(...)\n#endif\n\nstatic void print_packet(const char *data, size_t len) {\n dprintf(\". PACKET=%03zu [ \", len);\n for (size_t i = 0; i < len; i++) {\n printf(\"%02X \", (unsigned char)data[i]);\n }\n dprintf(\"]\\n\");\n dprintf(\". [\");\n for (size_t i = 0; i < len; i++) {\n unsigned char ch = data[i];\n if (ch < ' ') {\n ch = '?';\n }\n dprintf(\"%c\", ch);\n }\n dprintf(\"]\\n\");\n}\n\nstatic int32_t read_i32(const char *data) {\n return ((uint32_t)(uint8_t)data[0] << 24) |\n ((uint32_t)(uint8_t)data[1] << 16) |\n ((uint32_t)(uint8_t)data[2] << 8) |\n ((uint32_t)(uint8_t)data[3] << 0);\n}\n\nstatic void write_i32(char *data, int32_t x) {\n data[0] = (uint8_t)(((uint32_t)x) >> 24) & 0xFF;\n data[1] = (uint8_t)(((uint32_t)x) >> 16) & 0xFF;\n data[2] = (uint8_t)(((uint32_t)x) >> 8) & 0xFF;\n data[3] = (uint8_t)(((uint32_t)x) >> 0) & 0xFF;\n}\n\nstatic int16_t read_i16(const char *data) {\n return ((uint16_t)(uint8_t)data[0] << 8) |\n ((uint16_t)(uint8_t)data[1] << 0);\n}\nstatic void write_i16(char *data, int16_t x) {\n data[0] = (uint8_t)(((uint16_t)x) >> 8) & 0xFF;\n data[1] = (uint8_t)(((uint16_t)x) >> 0) & 0xFF;\n}\n\n// parse_begin is called to begin parsing a client message.\n#define parse_begin() \\\n const char *p = data; \\\n const char *e = p+len; \\\n (void)args, (void)pg, (void)e;\n\n// parse_end is called when parsing client message is complete.\n// This will check that the position of the client stream matches the\n// expected lenght provided by the client. \n#define parse_end() \\\n if ((size_t)(p-data) != len) { \\\n return -1; \\\n }\n\n#define parse_cstr() ({ \\\n const char *cstr = 0; \\\n const char *s = p; \\\n while (p < e) { \\\n if (*p == '\\0') { \\\n cstr = s; \\\n p++; \\\n break; \\\n } \\\n p++; \\\n } \\\n if (!cstr) { \\\n return -1; \\\n } \\\n cstr; \\\n}) \n\n#define parse_int16() ({ \\\n if (e-p < 2) { \\\n return -1; \\\n } \\\n int16_t x = read_i16(p); \\\n p += 2; \\\n x; \\\n})\n\n#define parse_byte() ({ \\\n if (e-p < 1) { \\\n return -1; \\\n } \\\n uint8_t x = *p; \\\n p += 1; \\\n x; \\\n})\n\n#define parse_int32() ({ \\\n if (e-p < 4) { \\\n return -1; \\\n } \\\n int32_t x = read_i32(p); \\\n p += 4; \\\n x; \\\n})\n\n#define parse_bytes(n) ({ \\\n if (e-p < n) { \\\n return -1; \\\n } \\\n const void *s = p; \\\n p += (n); \\\n s; \\\n})\n\nstatic void arg_append_unescape_simplestr(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n char *str2 = xmalloc(slen+1);\n for (size_t i = 0; i < str2len; i++) {\n if (str[i] == '\\'' && str[i+1] == '\\'') {\n i++;\n }\n str2[str2len++] = str[i];\n }\n args_append(args, str2, str2len, false);\n xfree(str2);\n}\n\nstatic void pg_statement_free(struct pg_statement *statement) {\n args_free(&statement->args);\n buf_clear(&statement->argtypes);\n}\n\n\nstatic void pg_portal_free(struct pg_portal *portal) {\n args_free(&portal->params);\n}\n\nstatic void statments_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n pg_statement_free(&statement);\n }\n hashmap_free(map);\n}\n\nstatic void portals_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n pg_portal_free(&portal);\n }\n hashmap_free(map);\n}\n\nstruct pg *pg_new(void) {\n struct pg *pg = xmalloc(sizeof(struct pg));\n memset(pg, 0, sizeof(struct pg));\n pg->oid = TEXTOID;\n return pg;\n}\n\nvoid pg_free(struct pg *pg) {\n if (!pg) {\n return;\n }\n xfree(pg->application_name);\n xfree(pg->database);\n xfree(pg->user);\n buf_clear(&pg->buf);\n statments_free(pg->statements);\n portals_free(pg->portals);\n args_free(&pg->targs);\n // args_free(&pg->xargs);\n xfree(pg->desc);\n xfree(pg);\n}\n\nstatic uint64_t pg_statement_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n return hashmap_murmur(statement.name, strlen(statement.name), seed0, seed1);\n}\n\nstatic uint64_t pg_portal_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n return hashmap_murmur(portal.name, strlen(portal.name), seed0, seed1);\n}\n\nstatic int pg_statement_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_statement stmta;\n memcpy(&stmta, a, sizeof(struct pg_statement));\n struct pg_statement stmtb;\n memcpy(&stmtb, b, sizeof(struct pg_statement));\n return strcmp(stmta.name, stmtb.name);\n}\n\nstatic int pg_portal_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_portal portala;\n memcpy(&portala, a, sizeof(struct pg_portal));\n struct pg_portal portalb;\n memcpy(&portalb, b, sizeof(struct pg_portal));\n return strcmp(portala.name, portalb.name);\n}\n\nstatic void portal_insert(struct pg *pg, struct pg_portal *portal) {\n (void)portal;\n if (!pg->portals) {\n pg->portals = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_portal), 0, 0, 0, pg_portal_hash, \n pg_portal_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->portals, portal);\n if (ptr) {\n struct pg_portal old;\n memcpy(&old, ptr, sizeof(struct pg_portal));\n pg_portal_free(&old);\n }\n}\n\nstatic void statement_insert(struct pg *pg, struct pg_statement *stmt) {\n if (!pg->statements) {\n pg->statements = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_statement), 0, 0, 0, pg_statement_hash, \n pg_statement_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->statements, stmt);\n if (ptr) {\n struct pg_statement old;\n memcpy(&old, ptr, sizeof(struct pg_statement));\n pg_statement_free(&old);\n }\n}\n\nstatic bool statement_get(struct pg *pg, const char *name, \n struct pg_statement *stmt)\n{\n if (!pg->statements) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_statement key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->statements, &key);\n if (!ptr) {\n return false;\n }\n memcpy(stmt, ptr, sizeof(struct pg_statement));\n return true;\n}\n\nstatic bool portal_get(struct pg *pg, const char *name, \n struct pg_portal *portal)\n{\n if (!pg->portals) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_portal key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->portals, &key);\n if (!ptr) {\n return false;\n }\n memcpy(portal, ptr, sizeof(struct pg_portal));\n return true;\n}\n\nstatic const uint8_t hextoks[256] = { \n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4,5,6,7,8,9,0,0,0,0,0,0,\n 0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n};\n\nstatic uint32_t decode_hex(const uint8_t *str) {\n return (((int)hextoks[str[0]])<<12) | (((int)hextoks[str[1]])<<8) |\n (((int)hextoks[str[2]])<<4) | (((int)hextoks[str[3]])<<0);\n}\n\nstatic bool is_surrogate(uint32_t cp) {\n return cp > 55296 && cp < 57344;\n}\n\nstatic uint32_t decode_codepoint(uint32_t cp1, uint32_t cp2) {\n return cp1 > 55296 && cp1 < 56320 && cp2 > 56320 && cp2 < 57344 ?\n ((cp1 - 55296) << 10) | ((cp2 - 56320) + 65536) :\n 65533;\n}\n\nstatic inline int encode_codepoint(uint8_t dst[], uint32_t cp) {\n if (cp < 128) {\n dst[0] = cp;\n return 1;\n } else if (cp < 2048) {\n dst[0] = 192 | (cp >> 6);\n dst[1] = 128 | (cp & 63);\n return 2;\n } else if (cp > 1114111 || is_surrogate(cp)) {\n cp = 65533; // error codepoint\n }\n if (cp < 65536) {\n dst[0] = 224 | (cp >> 12);\n dst[1] = 128 | ((cp >> 6) & 63);\n dst[2] = 128 | (cp & 63);\n return 3;\n }\n dst[0] = 240 | (cp >> 18);\n dst[1] = 128 | ((cp >> 12) & 63);\n dst[2] = 128 | ((cp >> 6) & 63);\n dst[3] = 128 | (cp & 63);\n return 4;\n}\n\n// for_each_utf8 iterates over each UTF-8 bytes in jstr, unescaping along the\n// way. 'f' is a loop expression that will make available the 'ch' char which \n// is just a single byte in a UTF-8 series.\n// this is taken from https://github.com/tidwall/json.c\n#define for_each_utf8(jstr, len, f) { \\\n size_t nn = (len); \\\n int ch = 0; \\\n (void)ch; \\\n for (size_t ii = 0; ii < nn; ii++) { \\\n if ((jstr)[ii] != '\\\\') { \\\n ch = (jstr)[ii]; \\\n if (1) f \\\n continue; \\\n }; \\\n ii++; \\\n if (ii == nn) break; \\\n switch ((jstr)[ii]) { \\\n case '\\\\': ch = '\\\\'; break; \\\n case '/' : ch = '/'; break; \\\n case 'b' : ch = '\\b'; break; \\\n case 'f' : ch = '\\f'; break; \\\n case 'n' : ch = '\\n'; break; \\\n case 'r' : ch = '\\r'; break; \\\n case 't' : ch = '\\t'; break; \\\n case '\"' : ch = '\"'; break; \\\n case 'u' : \\\n if (ii+5 > nn) { nn = 0; continue; }; \\\n uint32_t cp = decode_hex((jstr)+ii+1); \\\n ii += 5; \\\n if (is_surrogate(cp)) { \\\n if (nn-ii >= 6 && (jstr)[ii] == '\\\\' && (jstr)[ii+1] == 'u') { \\\n cp = decode_codepoint(cp, decode_hex((jstr)+ii+2)); \\\n ii += 6; \\\n } \\\n } \\\n uint8_t _bytes[4]; \\\n int _n = encode_codepoint(_bytes, cp); \\\n for (int _j = 0; _j < _n; _j++) { \\\n ch = _bytes[_j]; \\\n if (1) f \\\n } \\\n ii--; \\\n continue; \\\n default: \\\n continue; \\\n }; \\\n if (1) f \\\n } \\\n}\n\nstatic void arg_append_unescape_str(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n uint8_t *str2 = xmalloc(slen+1);\n for_each_utf8((uint8_t*)str, slen, {\n str2[str2len++] = ch;\n });\n args_append(args, (char*)str2, str2len, false);\n xfree(str2);\n}\n\n// Very simple map to stores all params numbers.\nstruct pmap {\n int count;\n int nbuckets;\n uint16_t *buckets;\n uint16_t def[8];\n};\n\nstatic void pmap_init(struct pmap *map) {\n memset(map, 0, sizeof(struct pmap));\n map->nbuckets = sizeof(map->def)/sizeof(uint16_t);\n map->buckets = map->def;\n}\n\nstatic void pmap_free(struct pmap *map) {\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n}\n\nstatic void pmap_insert0(uint16_t *buckets, int nbuckets, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%nbuckets;\n while (1) {\n if (buckets[i] == 0) {\n buckets[i] = param;\n return;\n }\n i = (i+1)%nbuckets;\n }\n}\n\nstatic void pmap_grow(struct pmap *map) {\n int nbuckets2 = map->nbuckets*2;\n uint16_t *buckets2 = xmalloc(nbuckets2*sizeof(uint16_t));\n memset(buckets2, 0, nbuckets2*sizeof(uint16_t));\n for (int i = 0; i < map->nbuckets; i++) {\n if (map->buckets[i]) {\n pmap_insert0(buckets2, nbuckets2, map->buckets[i]);\n }\n }\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n map->buckets = buckets2;\n map->nbuckets = nbuckets2;\n}\n\nstatic void pmap_insert(struct pmap *map, uint16_t param) {\n assert(param != 0);\n if (map->count == (map->nbuckets>>1)+(map->nbuckets>>2)) {\n pmap_grow(map);\n }\n pmap_insert0(map->buckets, map->nbuckets, param);\n map->count++;\n}\n\nstatic bool pmap_exists(struct pmap *map, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%map->nbuckets;\n while (1) {\n if (map->buckets[i] == 0) {\n return false;\n }\n if (map->buckets[i] == param) {\n return true;\n }\n i = (i+1)%map->nbuckets;\n }\n}\n\nstatic bool parse_query_args(const char *query, struct args *args, \n int *nparams, struct buf *argtypes)\n{\n dprintf(\"parse_query: [%s]\\n\", query);\n struct pmap pmap;\n pmap_init(&pmap);\n\n // loop through each keyword\n while (isspace(*query)) {\n query++;\n }\n bool ok = false;\n bool esc = false;\n const char *str;\n const char *p = query;\n bool join = false;\n while (*p) {\n switch (*p) {\n case ';':\n goto break_while;\n case '\\\"':\n // identifier\n parse_errorf(\"idenifiers not allowed\");\n goto done;\n case '\\'':\n // simple string\n p++;\n str = p;\n esc = false;\n while (*p) {\n if (*p == '\\'') {\n if (*(p+1) == '\\'') {\n esc = true;\n p += 2;\n continue;\n }\n break;\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_simplestr(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n case '$':\n // dollar-quote or possible param\n if (*(p+1) >= '0' && *(p+1) <= '9') {\n char *e = 0;\n long param = strtol(p+1, &e, 10);\n if (param == 0 || param > 0xFFFF) {\n parse_errorf(\"there is no parameter $%ld\", param);\n goto done;\n }\n pmap_insert(&pmap, param);\n args_append(args, p, e-p, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'P'+join);\n join = *e && !isspace(*e);\n }\n p = e;\n continue;\n }\n // dollar-quote strings not\n parse_errorf(\"dollar-quote strings not allowed\");\n goto done;\n case 'E': case 'e':\n if (*(p+1) == '\\'') {\n // escaped string\n p += 2;\n str = p;\n while (*p) {\n if (*p == '\\\\') {\n esc = true;\n } else if (*p == '\\'') {\n size_t x = 0;\n while (*(p-x-1) == '\\\\') {\n x++;\n }\n if ((x%2)==0) {\n break;\n }\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_str(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n }\n // fallthrough\n default:\n if (isspace(*p)) {\n p++;\n continue;\n }\n // keyword\n const char *keyword = p;\n while (*p && !isspace(*p)) {\n if (*p == ';' || *p == '\\'' || *p == '\\\"' || *p == '$') {\n break;\n }\n p++;\n }\n size_t keywordlen = p-keyword;\n args_append(args, keyword, keywordlen, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *p && !isspace(*p);\n }\n while (isspace(*p)) {\n p++;\n }\n continue;\n }\n p++;\n }\nbreak_while:\n while (*p) {\n if (*p != ';') {\n parse_errorf(\"unexpected characters at end of query\");\n goto done;\n }\n p++;\n }\n ok = true;\ndone:\n if (ok) {\n // check params\n for (int i = 0; i < pmap.count; i++) {\n if (!pmap_exists(&pmap, i+1)) {\n parse_errorf(\"missing parameter $%d\", i+1);\n ok = false;\n break;\n }\n }\n }\n *nparams = pmap.count;\n pmap_free(&pmap);\n if (argtypes) {\n buf_append_byte(argtypes, '\\0');\n }\n return ok;\n}\n\nstatic bool parse_cache_query_args(const char *query, struct args *args,\n int *maxparam, struct buf *argtypes)\n{\n while (isspace(*query)) {\n query++;\n }\n if (!parse_query_args(query, args, maxparam, argtypes)) {\n return false;\n }\n#ifdef PGDEBUG\n args_print(args);\n#endif\n if (argtypes) {\n dprintf(\"argtypes: [%s]\\n\", argtypes->data);\n }\n return true;\n}\n\nstatic size_t parseQ(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Query\n dprintf(\">>> Query\\n\");\n parse_begin();\n const char *query = parse_cstr();\n parse_end();\n int nparams = 0;\n bool pok = parse_cache_query_args(query, args, &nparams, 0);\n if (!pok) {\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (nparams > 0) {\n parse_seterror(\"query cannot have parameters\");\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (args->len == 0) {\n pg->empty_query = 1;\n }\n return len;\n}\n\nstatic size_t parseP(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Parse\n dprintf(\"<<< Parse\\n\");\n // print_packet(data, len);\n parse_begin();\n const char *stmt_name = parse_cstr();\n const char *query = parse_cstr();\n uint16_t num_param_types = parse_int16();\n // dprintf(\". Parse [%s] [%s] [%d]\\n\", stmt_name, query,\n // (int)num_param_types);\n for (uint16_t i = 0; i < num_param_types; i++) {\n int32_t param_type = parse_int32();\n (void)param_type;\n // dprintf(\". [%d]\\n\", param_type);\n }\n parse_end();\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n int nparams = 0;\n struct buf argtypes = { 0 };\n bool ok = parse_cache_query_args(query, args, &nparams, &argtypes);\n if (!ok) {\n pg->error = 1;\n args_clear(args);\n buf_clear(&argtypes);\n return len;\n }\n // copy over last statement\n struct pg_statement stmt = { 0 };\n strcpy(stmt.name, stmt_name);\n stmt.nparams = nparams;\n // copy over parsed args\n for (size_t i = 0; i < args->len; i++) {\n args_append(&stmt.args, args->bufs[i].data, args->bufs[i].len, false);\n }\n args_clear(args);\n stmt.argtypes = argtypes;\n statement_insert(pg, &stmt);\n pg->parse = 1;\n return len;\n}\n\nstatic size_t parseD(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Describe\n dprintf(\"<<< Describe\\n\");\n if (pg->describe) {\n // Already has a describe in a sequence\n pg->error = 1;\n parse_errorf(\"double describe not allowed\");\n return -1;\n }\n // print_packet(data, len);\n parse_begin();\n uint8_t type = parse_byte();\n const char *name = parse_cstr();\n parse_end();\n\n dprintf(\". Describe [%c] [%s]\\n\", type, name);\n if (type == 'P' || type == 'P'+1) {\n struct pg_portal portal;\n if (!portal_get(pg, name, &portal)) {\n parse_errorf(\"portal not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('T')\n // Int32 length\n // Int16 field_count\n // Field[] fields\n // all fields are unnamed text\n char field[] = { \n 0x00, // \"\\0\" (field name)\n 0x00, 0x00, 0x00, 0x00, // table_oid = 0\n 0x00, 0x00, // column_attr_no = 0\n 0x00, 0x00, 0x00, pg->oid, // type_oid = 25 (text)\n 0xFF, 0xFF, // type_size = -1\n 0xFF, 0xFF, 0xFF, 0xFF, // type_modifier = -1\n 0x00, 0x00, // format_code = 0 (text)\n };\n static_assert(sizeof(field) == 19, \"\");\n size_t size = 1+4+2+portal.params.len*sizeof(field);\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 'T';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, portal.params.len);\n p1 += 2;\n for (size_t i = 0; i < portal.params.len; i++) {\n memcpy(p1, field, sizeof(field));\n p1 += sizeof(field);\n }\n pg->desclen = size;\n return len;\n }\n\n if (type == 'S') {\n struct pg_statement stmt;\n if (!statement_get(pg, name, &stmt)) {\n parse_errorf(\"statement not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('t')\n // Int32 length\n // Int16 num_params\n // Int32[] param_type_oids\n size_t size = 1+4+2+stmt.nparams*4;\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 't';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, stmt.nparams);\n p1 += 2;\n for (int i = 0; i < stmt.nparams; i++) {\n write_i32(p1, pg->oid);\n p1 += 4;\n }\n pg->desclen = size;\n pg->describe = 1;\n return len;\n }\n parse_errorf(\"unsupported describe type '%c'\", type);\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseB(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n\n // Bind\n dprintf(\"<<< Bind\\n\");\n\n // print_packet(data, len);\n\n // X Byte1('B') # Bind message identifier\n // X Int32 length # Message length including self\n //\n // String portal_name # Destination portal (\"\" = unnamed)\n // String statement_name # Prepared statement name (from Parse)\n // Int16 num_format_codes # 0 = all text, 1 = one for all, or N\n // [Int16] format_codes # 0 = text, 1 = binary\n // Int16 num_parameters\n // [parameter values]\n // Int16 num_result_formats\n // [Int16] result_format_codes\n\n parse_begin();\n const char *portal_name = parse_cstr();\n const char *stmt_name = parse_cstr();\n int num_formats = parse_int16();\n for (int i = 0; i < num_formats; i++) {\n int format = parse_int16();\n if (format != 0 && format != 1) {\n parse_errorf(\"only text or binary format allowed\");\n pg->error = 1;\n return len;\n }\n }\n uint16_t num_params = parse_int16();\n args_clear(&pg->targs);\n for (int i = 0; i < num_params; i++) {\n int32_t len = parse_int32();\n if (len <= 0) {\n // Nulls are empty strings\n len = 0;\n }\n const char *b = parse_bytes(len);\n args_append(&pg->targs, b, len, false);\n }\n // ignore result formats\n uint16_t num_result_formats = parse_int16();\n for (int i = 0; i < num_result_formats; i++) {\n int result_format_codes = parse_int16();\n (void)result_format_codes;\n }\n parse_end();\n\n if (strlen(portal_name) >= PGNAMEDATALEN) {\n parse_seterror(\"portal name too large\");\n pg->error = 1;\n return len;\n }\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n struct pg_portal portal = { 0 };\n strcpy(portal.name, portal_name);\n strcpy(portal.stmt, stmt_name);\n memcpy(&portal.params, &pg->targs, sizeof(struct args));\n memset(&pg->targs, 0, sizeof(struct args));\n portal_insert(pg, &portal);\n pg->bind = 1;\n return len;\n}\n\nstatic size_t parseX(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Close\n dprintf(\"<<< Close\\n\");\n parse_begin();\n parse_end();\n pg->close = 1;\n return len;\n}\n\nstatic size_t parseE(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Execute\n dprintf(\"<<< Execute\\n\");\n parse_begin();\n const char *portal_name = parse_cstr();\n size_t max_rows = parse_int32();\n parse_end();\n struct pg_portal portal;\n if (!portal_get(pg, portal_name, &portal)) {\n parse_seterror(\"portal not found\");\n pg->error = 1;\n return len;\n }\n struct pg_statement stmt;\n if (!statement_get(pg, portal.stmt, &stmt)) {\n parse_seterror(\"statement not found\");\n pg->error = 1;\n return len;\n }\n if ((size_t)stmt.nparams != portal.params.len) {\n parse_seterror(\"portal params mismatch\");\n pg->error = 1;\n return len;\n }\n // ignore max_rows\n (void)max_rows;\n\n // \n args_clear(&pg->targs);\n for (size_t i = 0; i < stmt.args.len; i++) {\n const char *arg = stmt.args.bufs[i].data;\n size_t arglen = stmt.args.bufs[i].len;\n char atype = stmt.argtypes.data[i];\n dprintf(\"[%.*s] [%c]\\n\", (int)arglen, arg, atype);\n bool join = false;\n switch (atype) {\n case 'A'+1:\n atype = 'A';\n join = true;\n break;\n case 'P':\n join = false;\n break;\n case 'P'+1:\n atype = 'P';\n join = true;\n break;\n }\n if (atype == 'P') {\n if (arglen == 0 || arg[0] != '$') {\n goto internal_error;\n }\n uint64_t x;\n bool ok = parse_u64(arg+1, arglen-1, &x);\n if (!ok || x == 0 || x > 0xFFFF) {\n goto internal_error;\n }\n size_t paramidx = x-1;\n if (paramidx >= portal.params.len) {\n goto internal_error;\n }\n arg = portal.params.bufs[paramidx].data;\n arglen = portal.params.bufs[paramidx].len;\n }\n if (join) {\n assert(pg->targs.len > 0);\n buf_append(&pg->targs.bufs[pg->targs.len-1], arg, arglen);\n } else {\n args_append(&pg->targs, arg, arglen, false);\n }\n }\n\n struct args swapargs = *args;\n *args = pg->targs;\n pg->targs = swapargs;\n\n#ifdef PGDEBUG\n args_print(args);\n#endif\n\n pg->execute = 1;\n return len;\ninternal_error:\n parse_seterror(\"portal params internal error\");\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseS(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args;\n // Sync\n dprintf(\"<<< Sync\\n\");\n // print_packet(data, len);\n parse_begin();\n parse_end();\n pg->sync = 1;\n return len;\n}\n\nstatic size_t parsep(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // PasswordMessage\n parse_begin();\n const char *password = parse_cstr();\n parse_end();\n if (strcmp(password, auth) != 0) {\n parse_seterror(\n \"WRONGPASS invalid username-password pair or user is disabled.\");\n return -1;\n }\n pg->auth = 1;\n return len;\n}\n\nstatic ssize_t parse_message(const char *data, size_t len, struct args *args,\n struct pg *pg)\n{\n if (len < 5) {\n return 0;\n }\n int msgbyte = data[0];\n size_t msglen = read_i32(data+1);\n if (len < msglen+1) {\n return 0;\n }\n msglen -= 4;\n data += 5;\n ssize_t ret;\n switch (msgbyte) {\n case 'Q':\n ret = parseQ(data, msglen, args, pg);\n break;\n case 'P':\n ret = parseP(data, msglen, args, pg);\n break;\n case 'X':\n ret = parseX(data, msglen, args, pg);\n break;\n case 'E':\n ret = parseE(data, msglen, args, pg);\n break;\n case 'p': // lowercase\n ret = parsep(data, msglen, args, pg);\n break;\n case 'D':\n ret = parseD(data, msglen, args, pg);\n break;\n case 'B':\n ret = parseB(data, msglen, args, pg);\n break;\n case 'S':\n ret = parseS(data, msglen, args, pg);\n break;\n default:\n pg->error = 1;\n parse_errorf(\"unknown message '%c'\", msgbyte);\n ret = msglen;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+5;\n}\n\nstatic ssize_t parse_magic_ssl(const char *data, size_t len, struct pg *pg) {\n (void)data;\n // SSLRequest\n pg->ssl = 1;\n return len;\n}\n\nstatic ssize_t parse_magic_proto3(const char *data, size_t len, struct pg *pg) {\n // StartupMessage\n const char *p = (void*)data;\n const char *e = p+len;\n // Read parameters\n const char *user = \"\";\n const char *database = \"\";\n const char *application_name = \"\";\n const char *client_encoding = \"\";\n const char *name = 0;\n const char *s = (char*)p;\n while (p < e) {\n if (*p == '\\0') {\n if (s != p) {\n if (name) {\n if (strcmp(name, \"database\") == 0) {\n database = s;\n } else if (strcmp(name, \"application_name\") == 0) {\n application_name = s;\n } else if (strcmp(name, \"client_encoding\") == 0) {\n client_encoding = s;\n } else if (strcmp(name, \"user\") == 0) {\n user = s;\n }\n name = 0;\n } else {\n name = s;\n }\n }\n s = p+1;\n }\n p++;\n }\n // dprintf(\". database=%s, application_name=%s, client_encoding=%s, \"\n // \"user=%s\\n\", database, application_name, client_encoding, user);\n if (*client_encoding && strcmp(client_encoding, \"UTF8\") != 0) {\n printf(\"# Invalid Postgres client_encoding (%s)\\n\",\n client_encoding);\n return -1;\n }\n pg->user = xmalloc(strlen(user)+1);\n strcpy((char*)pg->user, user);\n pg->database = xmalloc(strlen(database)+1);\n strcpy((char*)pg->database, database);\n pg->application_name = xmalloc(strlen(application_name)+1);\n strcpy((char*)pg->application_name, application_name);\n pg->startup = 1;\n return p-data;\n}\n\nstatic ssize_t parse_magic_cancel(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n parse_errorf(\"cancel message unsupported\");\n return -1;\n}\n\nstatic ssize_t parse_magic(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n if (len < 4) {\n return 0;\n }\n size_t msglen = read_i32(data);\n if (msglen > 65536) {\n parse_errorf(\"message too large\");\n return -1;\n }\n if (len < msglen) {\n return 0;\n }\n if (msglen < 8) {\n parse_errorf(\"invalid message\");\n return -1;\n }\n // dprintf(\"parse_magic\\n\");\n uint32_t magic = read_i32(data+4);\n data += 8;\n msglen -= 8;\n ssize_t ret;\n switch (magic) {\n case 0x04D2162F: \n ret = parse_magic_ssl(data, msglen, pg);\n break;\n case 0x00030000: \n ret = parse_magic_proto3(data, msglen, pg);\n break;\n case 0xFFFF0000: \n ret = parse_magic_cancel(data, msglen, pg);\n break;\n default:\n parse_errorf(\"Protocol error: unknown magic number %08x\", magic);\n ret = -1;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+8;\n}\n\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pgptr)\n{\n (void)print_packet;\n // print_packet(data, len);\n struct pg *pg = *pgptr;\n if (!pg) {\n pg = pg_new();\n *pgptr = pg;\n }\n pg->error = 0;\n if (len == 0) {\n return 0;\n }\n if (data[0] == 0) {\n return parse_magic(data, len, pg);\n }\n return parse_message(data, len, args, pg);\n}\n\nvoid pg_write_auth(struct conn *conn, unsigned char code) {\n unsigned char bytes[] = { \n 'R', 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n}\n\nvoid pg_write_ready(struct conn *conn, unsigned char code) {\n if (!pg_execute(conn)) {\n unsigned char bytes[] = { \n 'Z', 0x0, 0x0, 0x0, 0x5, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n }\n}\n\nvoid pg_write_status(struct conn *conn, const char *key, const char *val) {\n size_t keylen = strlen(key);\n size_t vallen = strlen(val);\n int32_t size = 4+keylen+1+vallen+1;\n char *bytes = xmalloc(1+size);\n bytes[0] = 'S';\n write_i32(bytes+1, size);\n memcpy(bytes+1+4,key,keylen+1);\n memcpy(bytes+1+4+keylen+1,val,vallen+1);\n conn_write_raw(conn, bytes, 1+size);\n xfree(bytes);\n}\n\nvoid pg_write_row_desc(struct conn *conn, const char **fields, int nfields){\n size_t size = 1+4+2;\n for (int i = 0; i < nfields; i++) {\n size += strlen(fields[i])+1;\n size += 4+2+4+2+4+2;\n }\n int oid = conn_pg(conn)->oid;\n char *bytes = xmalloc(size);\n bytes[0] = 'T';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, nfields); // field_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < nfields; i++) {\n size_t fsize = strlen(fields[i]);\n memcpy(p, fields[i], fsize+1);\n p += fsize+1;\n write_i32(p, 0); // table_oid\n p += 4;\n write_i16(p, 0); // column_attr_number\n p += 2;\n write_i32(p, oid); // type_oid\n p += 4;\n write_i16(p, -1); // type_size\n p += 2;\n write_i32(p, -1); // type_modifier\n p += 4;\n write_i16(p, 1); // format_code\n p += 2;\n }\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_row_data(struct conn *conn, const char **cols, \n const size_t *collens, int ncols)\n{\n size_t size = 1+4+2;\n for (int i = 0; i < ncols; i++) {\n size += 4+collens[i];\n }\n char *bytes = xmalloc(size);\n bytes[0] = 'D';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, ncols); // column_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < ncols; i++) {\n write_i32(p, collens[i]); // column_length\n p += 4;\n#ifdef PGDEBUG\n printf(\" ROW >>>> len:%zu [\", collens[i]);\n binprint(cols[i], collens[i]);\n printf(\"]\\n\");\n#endif\n memcpy(p, cols[i], collens[i]); // column_data\n p += collens[i];\n }\n \n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_complete(struct conn *conn, const char *tag){\n size_t taglen = strlen(tag);\n size_t size = 1+4+taglen+1;\n char *bytes = xmalloc(size);\n bytes[0] = 'C';\n write_i32(bytes+1, size-1); // message_size\n memcpy(bytes+1+4, tag, taglen+1);\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_completef(struct conn *conn, const char *tag_format, ...){\n // initializing list pointer\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_complete(conn, tag);\n}\n\nvoid pg_write_simple_row_data_ready(struct conn *conn, const char *desc,\n const void *row, size_t len, const char *tag)\n{\n pg_write_row_desc(conn, (const char*[]){ desc }, 1);\n pg_write_row_data(conn, (const char*[]){ row }, (size_t[]){ len }, 1);\n pg_write_complete(conn, tag);\n pg_write_ready(conn, 'I');\n}\n\nvoid pg_write_simple_row_str_ready(struct conn *conn, const char *desc,\n const char *row, const char *tag)\n{\n pg_write_simple_row_data_ready(conn, desc, row, strlen(row), tag);\n}\n\nvoid pg_write_simple_row_i64_ready(struct conn *conn, const char *desc,\n int64_t row, const char *tag)\n{\n char val[32];\n snprintf(val, sizeof(val), \"%\" PRIi64, row);\n pg_write_simple_row_str_ready(conn, desc, val, tag);\n}\n\nvoid pg_write_simple_row_str_readyf(struct conn *conn, const char *desc,\n const char *row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_str_ready(conn, desc, row, tag);\n}\n\nvoid pg_write_simple_row_i64_readyf(struct conn *conn, const char *desc,\n int64_t row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_i64_ready(conn, desc, row, tag);\n}\n\nstatic void write_auth_ok(struct conn *conn, struct pg *pg) {\n // dprintf(\">> AuthOK\\n\");\n pg_write_auth(conn, 0); // AuthOK;\n // startup message received, respond\n pg_write_status(conn, \"client_encoding\", \"UTF8\");\n pg_write_status(conn, \"server_encoding\", \"UTF8\");\n char status[128];\n snprintf(status, sizeof(status), \"%s (Pogocache)\", version);\n pg_write_status(conn, \"server_version\", status);\n pg_write_ready(conn, 'I'); // Idle;\n pg->ready = 1;\n}\n\n// Respond to various the connection states.\n// Returns true if the all responses complete or false if there was an\n// error.\nbool pg_respond(struct conn *conn, struct pg *pg) {\n if (pg->error) {\n conn_write_error(conn, parse_lasterror());\n return true;\n }\n if (pg->empty_query) {\n dprintf(\"====== pg_respond(pg->empty_query) =====\\n\");\n conn_write_raw(conn, \"I\\0\\0\\0\\4\", 5);\n conn_write_raw(conn, \"Z\\0\\0\\0\\5I\", 6);\n pg->empty_query = 0;\n return true;\n }\n if (pg->parse) {\n dprintf(\"====== pg_respond(pg->parse) =====\\n\");\n conn_write_raw(conn, \"1\\0\\0\\0\\4\", 5);\n pg->parse = 0;\n return true;\n }\n if (pg->bind) {\n dprintf(\"====== pg_respond(pg->bind) =====\\n\");\n conn_write_raw(conn, \"2\\0\\0\\0\\4\", 5);\n pg->bind = 0;\n return true;\n }\n if (pg->describe) {\n dprintf(\"====== pg_respond(pg->describe) =====\\n\");\n assert(pg->desc);\n conn_write_raw(conn, pg->desc, pg->desclen);\n xfree(pg->desc);\n pg->desc = 0;\n pg->desclen = 0;\n pg->describe = 0;\n return true;\n }\n if (pg->sync) {\n dprintf(\"====== pg_respond(pg->sync) =====\\n\");\n pg->execute = 0;\n pg_write_ready(conn, 'I');\n pg->sync = 0;\n return true;\n }\n if (pg->close) {\n dprintf(\"====== pg_respond(pg->close) =====\\n\");\n pg->close = 0;\n return false;\n }\n if (pg->ssl == 1) {\n if (!conn_istls(conn)) {\n conn_write_raw_cstr(conn, \"N\");\n } else {\n conn_write_raw_cstr(conn, \"Y\");\n }\n pg->ssl = 0;\n return true;\n }\n if (pg->auth == 1) {\n if (pg->startup == 0) {\n return false;\n }\n conn_setauth(conn, true);\n write_auth_ok(conn, pg);\n pg->auth = 0;\n return true;\n }\n if (pg->startup == 1) {\n if (auth && *auth) {\n pg_write_auth(conn, 3); // AuthenticationCleartextPassword;\n } else {\n write_auth_ok(conn, pg);\n pg->startup = 0;\n }\n return true;\n }\n return true;\n}\n\nvoid pg_write_error(struct conn *conn, const char *msg) {\n size_t msglen = strlen(msg);\n size_t size = 1+4;\n size += 1+5+1; // 'S' \"ERROR\" \\0\n size += 1+5+1; // 'V' \"ERROR\" \\0\n size += 1+5+1; // 'C' \"23505\" \\0\n size += 1+msglen+1; // 'M' msg \\0\n size += 1; // null-terminator\n char *bytes = xmalloc(size);\n bytes[0] = 'E';\n write_i32(bytes+1, size-1);\n char *p = bytes+1+4;\n memcpy(p, \"SERROR\", 7);\n p += 7;\n memcpy(p, \"VERROR\", 7);\n p += 7;\n memcpy(p, \"C23505\", 7);\n p += 7;\n p[0] = 'M';\n p++;\n memcpy(p, msg, msglen+1);\n p += msglen+1;\n p[0] = '\\0';\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\n// return true if the command need further execution, of false if this\n// operation handled it already\nbool pg_precommand(struct conn *conn, struct args *args, struct pg *pg) {\n#ifdef PGDEBUG\n printf(\"precommand: \");\n args_print(args);\n#endif\n if (args->len > 0 && args->bufs[0].len > 0) {\n char c = tolower(args->bufs[0].data[0]);\n if (c == 'b' || c == 'r' || c == 'c') {\n // silently ignore transaction commands.\n if (c == 'b' && argeq(args, 0, \"begin\")) {\n pg_write_completef(conn, \"BEGIN\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"rollback\")) {\n pg_write_completef(conn, \"ROLLBACK\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"commit\")) {\n pg_write_completef(conn, \"COMMIT\");\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n if (c == ':' && args->bufs[0].len > 1 && args->bufs[0].data[1] == ':') {\n if (argeq(args, 0, \"::bytea\") || argeq(args, 0, \"::bytes\")) {\n pg->oid = BYTEAOID;\n } else if (argeq(args, 0, \"::text\")) {\n pg->oid = TEXTOID;\n } else {\n char err[128];\n snprintf(err, sizeof(err), \"unknown type '%.*s'\", \n (int)(args->bufs[0].len-2), args->bufs[0].data+2);\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n return false;\n }\n args_remove_first(args);\n if (args->len == 0) {\n if (pg->oid == BYTEAOID) {\n pg_write_completef(conn, \"BYTEA\");\n } else {\n pg_write_completef(conn, \"TEXT\");\n }\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n }\n return true;\n}\n"], ["/pogocache/src/save.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit save.c provides an interface for saving and loading Pogocache\n// data files.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"pogocache.h\"\n#include \"buf.h\"\n#include \"util.h\"\n#include \"lz4.h\"\n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#define BLOCKSIZE 1048576\n#define COMPRESS\n\nextern struct pogocache *cache;\nextern const int verb;\n\nstruct savectx {\n pthread_t th; // work thread\n int index; // thread index\n pthread_mutex_t *lock; // write lock\n int fd; // work file descriptor\n int start; // current shard\n int count; // number of shards to process\n struct buf buf; // block buffer\n bool ok; // final ok\n int errnum; // final errno status\n struct buf dst; // compressed buffer space\n size_t nentries; // number of entried in block buffer\n};\n\nstatic int flush(struct savectx *ctx) {\n if (ctx->nentries == 0) {\n ctx->buf.len = 0;\n return 0;\n }\n // Make sure that there's enough space in the dst buffer to store the\n // header (16 bytes) and the compressed data.\n size_t bounds = LZ4_compressBound(ctx->buf.len);\n buf_ensure(&ctx->dst, 16+bounds);\n // Compress the block\n uint32_t len = LZ4_compress_default((char*)ctx->buf.data, \n (char*)ctx->dst.data+16, ctx->buf.len, bounds);\n // The block is now compressed.\n // Genreate a checksum of the compressed data.\n uint32_t crc = crc32(ctx->dst.data+16, len);\n // Write the 16 byte header\n // (0-3) 'POGO' tag\n memcpy(ctx->dst.data, \"POGO\", 4);\n // (4-7) Checksum\n write_u32(ctx->dst.data+4, crc);\n // (8-11) Len of decompressed data \n write_u32(ctx->dst.data+8, ctx->buf.len);\n // (12-15) Len of compressed data \n write_u32(ctx->dst.data+12, len);\n // The rest of the dst buffer contains the compressed bytes\n uint8_t *p = (uint8_t*)ctx->dst.data;\n uint8_t *end = p + len+16;\n bool ok = true;\n pthread_mutex_lock(ctx->lock);\n while (p < end) {\n ssize_t n = write(ctx->fd, p, end-p);\n if (n < 0) {\n ok = false;\n break;\n }\n p += n;\n }\n pthread_mutex_unlock(ctx->lock);\n ctx->buf.len = 0;\n ctx->nentries = 0;\n return ok ? 0 : -1;\n};\n\nstatic int save_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard;\n struct savectx *ctx = udata;\n buf_append_byte(&ctx->buf, 0); // entry type. zero=k/v string pair;\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n buf_append_uvarint(&ctx->buf, valuelen);\n buf_append(&ctx->buf, value, valuelen);\n if (expires > 0) {\n int64_t ttl = expires-time;\n assert(ttl > 0);\n buf_append_uvarint(&ctx->buf, ttl);\n } else {\n buf_append_uvarint(&ctx->buf, 0);\n }\n buf_append_uvarint(&ctx->buf, flags);\n buf_append_uvarint(&ctx->buf, cas);\n ctx->nentries++;\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void *thsave(void *arg) {\n struct savectx *ctx = arg;\n for (int i = 0; i < ctx->count; i++) {\n int shardidx = ctx->start+i;\n struct pogocache_iter_opts opts = {\n .oneshard = true,\n .oneshardidx = shardidx,\n .time = sys_now(),\n .entry = save_entry,\n .udata = ctx,\n };\n // write the unix timestamp before entries\n buf_append_uvarint(&ctx->buf, sys_unixnow());\n int status = pogocache_iter(cache, &opts);\n if (status == POGOCACHE_CANCELED) {\n goto done;\n }\n if (flush(ctx) == -1) {\n goto done;\n }\n }\n ctx->ok = true;\ndone:\n buf_clear(&ctx->buf);\n buf_clear(&ctx->dst);\n ctx->errnum = errno;\n return 0;\n}\n\nint save(const char *path, bool fast) {\n uint64_t seed = sys_seed();\n size_t psize = strlen(path)+32;\n char *workpath = xmalloc(psize);\n snprintf(workpath, psize, \"%s.%08x.pogocache.work\", path, \n (int)(seed%INT_MAX));\n if (verb > 1) {\n printf(\". Saving to work file %s\\n\", workpath);\n }\n int fd = open(workpath, O_RDWR|O_CREAT, S_IRUSR|S_IRGRP|S_IROTH);\n if (fd == -1) {\n return -1;\n }\n int nshards = pogocache_nshards(cache);\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n if (!fast) {\n nprocs = 1;\n }\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n struct savectx *ctxs = xmalloc(nprocs*sizeof(struct savectx));\n memset(ctxs, 0, nprocs*sizeof(struct savectx));\n bool ok = false;\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n ctx->index = i;\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->fd = fd;\n ctx->lock = &lock;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (nprocs > 1) {\n if (pthread_create(&ctx->th, 0, thsave, ctx) == -1) {\n ctx->th = 0;\n }\n }\n start += ctx->count;\n }\n // execute operations on failed threads (or fast=false)\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thsave(ctx);\n }\n }\n // wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n // check for any failures\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (!ctx->ok) {\n errno = ctx->errnum;\n goto done;\n }\n }\n // Move file work file to final path\n if (rename(workpath, path) == -1) {\n goto done;\n }\n ok = true;\ndone:\n close(fd);\n unlink(workpath);\n xfree(workpath);\n xfree(ctxs);\n return ok ? 0 : -1;\n}\n\n// compressed block\nstruct cblock {\n struct buf cdata; // compressed data\n size_t dlen; // decompressed size\n};\n\nstruct loadctx {\n pthread_t th;\n\n // shared context\n pthread_mutex_t *lock;\n pthread_cond_t *cond;\n bool *donereading; // shared done flag\n int *nblocks; // number of blocks in queue\n struct cblock *blocks; // the block queue\n bool *failure; // a thread will set this upon error\n\n // thread status\n atomic_bool ok;\n int errnum;\n size_t ninserted;\n size_t nexpired;\n};\n\nstatic bool load_block(struct cblock *block, struct loadctx *ctx) {\n (void)ctx;\n bool ok = false;\n\n int64_t now = sys_now();\n int64_t unixnow = sys_unixnow();\n\n // decompress block\n char *ddata = xmalloc(block->dlen);\n int ret = LZ4_decompress_safe(block->cdata.data, ddata, block->cdata.len, \n block->dlen);\n if (ret < 0 || (size_t)ret != block->dlen) {\n printf(\". bad compressed block\\n\");\n goto done;\n }\n buf_clear(&block->cdata);\n uint8_t *p = (void*)ddata;\n uint8_t *e = p + block->dlen;\n\n int n;\n uint64_t x;\n // read unix time\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n printf(\". bad unix time\\n\");\n goto done;\n }\n p += n;\n\n int64_t unixtime = x;\n // printf(\". unixtime=%lld\\n\", unixtime);\n\n // Read each entry from decompressed data\n while (e > p) {\n /////////////////////\n // kind\n uint8_t kind = *(p++);\n \n if (kind != 0) {\n // only k/v strings allowed at this time.\n printf(\">> %d\\n\", kind);\n printf(\". unknown kind\\n\");\n goto done;\n }\n /////////////////////\n // key\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t keylen = x;\n if ((size_t)(e-p) < keylen) {\n goto done;\n }\n const uint8_t *key = p;\n p += keylen;\n /////////////////////\n // val\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t vallen = x;\n if ((size_t)(e-p) < vallen) {\n goto done;\n }\n const uint8_t *val = p;\n p += vallen;\n /////////////////////\n // ttl\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n goto done;\n }\n int64_t ttl = x;\n p += n;\n /////////////////////\n // flags\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > UINT32_MAX) {\n goto done;\n }\n uint32_t flags = x;\n p += n;\n /////////////////////\n // cas\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0) {\n goto done;\n }\n uint64_t cas = x;\n p += n;\n if (ttl > 0) {\n int64_t unixexpires = int64_add_clamp(unixtime, ttl);\n if (unixexpires < unixnow) {\n // already expired, skip this entry\n ctx->nexpired++;\n continue;\n }\n ttl = unixexpires-unixnow;\n }\n struct pogocache_store_opts opts = {\n .flags = flags,\n .time = now,\n .ttl = ttl,\n .cas = cas,\n };\n // printf(\"[%.*s]=[%.*s]\\n\", (int)keylen, key, (int)vallen, val);\n int ret = pogocache_store(cache, key, keylen, val, vallen, &opts);\n (void)ret;\n assert(ret == POGOCACHE_INSERTED || ret == POGOCACHE_REPLACED);\n ctx->ninserted++;\n }\n ok = true;\ndone:\n buf_clear(&block->cdata);\n xfree(ddata);\n if (!ok) {\n printf(\". bad block\\n\");\n }\n return ok;\n}\n\nstatic void *thload(void *arg) {\n struct loadctx *ctx = arg;\n pthread_mutex_lock(ctx->lock);\n while (1) {\n if (*ctx->failure) {\n break;\n }\n if (*ctx->nblocks > 0) {\n // Take a block for processing\n struct cblock block = ctx->blocks[(*ctx->nblocks)-1];\n (*ctx->nblocks)--;\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n ctx->ok = load_block(&block, ctx);\n pthread_mutex_lock(ctx->lock);\n if (!ctx->ok) {\n *ctx->failure = true;\n break;\n }\n // next block\n continue;\n }\n if (*ctx->donereading) {\n break;\n }\n pthread_cond_wait(ctx->cond, ctx->lock);\n }\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n if (!ctx->ok) {\n ctx->errnum = errno;\n }\n return 0;\n}\n\n// load data into cache from path\nint load(const char *path, bool fast, struct load_stats *stats) {\n // Use a single stream reader. Handing off blocks to threads.\n struct load_stats sstats;\n if (!stats) {\n stats = &sstats;\n }\n memset(stats, 0, sizeof(struct load_stats));\n\n int fd = open(path, O_RDONLY);\n if (fd == -1) {\n return -1;\n }\n\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n pthread_cond_t cond = PTHREAD_COND_INITIALIZER;\n bool donereading = false;\n bool failure = false;\n\n int nprocs = fast ? sys_nprocs() : 1;\n struct loadctx *ctxs = xmalloc(nprocs*sizeof(struct loadctx));\n memset(ctxs, 0, nprocs*sizeof(struct loadctx));\n int nblocks = 0;\n struct cblock *blocks = xmalloc(sizeof(struct cblock)*nprocs);\n memset(blocks, 0, sizeof(struct cblock)*nprocs);\n int therrnum = 0;\n bool ok = true;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n ctx->lock = &lock;\n ctx->cond = &cond;\n ctx->donereading = &donereading;\n ctx->nblocks = &nblocks;\n ctx->failure = &failure;\n ctx->blocks = blocks;\n atomic_init(&ctx->ok, true);\n if (pthread_create(&ctx->th, 0, thload, ctx) == -1) {\n ctx->th = 0;\n ok = false;\n if (therrnum == 0) {\n therrnum = errno;\n }\n }\n }\n if (!ok) {\n // there was an error creating a thread. \n // At this point there may be some orphaned threads waiting on \n // a condition variable. \n goto shutdown_threads;\n }\n\n // Read the blocks from file, one at a time, handing putting blocks into\n // the 'blocks' queue. The running threads will pick these up and \n // process them in no specific order.\n struct buf cdata = { 0 };\n bool shortread = false;\n while (ok) {\n uint8_t head[16];\n ssize_t size = read(fd, head, 16);\n if (size <= 0) {\n if (size == -1) {\n ok = false;\n }\n break;\n }\n if (size < 16) {\n printf(\". bad head size\\n\");\n ok = false;\n break;\n }\n if (memcmp(head, \"POGO\", 4) != 0) {\n printf(\". missing 'POGO'\\n\");\n ok = false;\n break;\n }\n uint32_t crc;\n memcpy(&crc, head+4, 4);\n size_t dlen = read_u32(head+8);\n size_t clen = read_u32(head+12);\n buf_ensure(&cdata, clen);\n bool okread = true;\n size_t total = 0;\n while (total < clen) {\n ssize_t rlen = read(fd, cdata.data+total, clen-total);\n if (rlen <= 0) {\n shortread = true;\n okread = false;\n break;\n }\n total += rlen;\n }\n if (!okread) {\n if (shortread) {\n printf(\". shortread\\n\");\n }\n ok = false;\n break;\n }\n cdata.len = clen;\n stats->csize += clen;\n stats->dsize += dlen;\n uint32_t crc2 = crc32(cdata.data, clen);\n if (crc2 != crc) {\n printf(\". bad crc\\n\");\n ok = false;\n goto bdone;\n }\n // We have a good block. Push it into the queue\n pthread_mutex_lock(&lock);\n while (1) {\n if (failure) {\n // A major error occured, stop reading now\n ok = false;\n break;\n }\n if (nblocks == nprocs) {\n // Queue is currently filled up.\n // Wait and try again.\n pthread_cond_wait(&cond, &lock);\n continue;\n }\n // Add block to queue\n blocks[nblocks++] = (struct cblock){ \n .cdata = cdata,\n .dlen = dlen,\n };\n memset(&cdata, 0, sizeof(struct buf));\n pthread_cond_broadcast(&cond);\n break;\n }\n pthread_mutex_unlock(&lock);\n }\nbdone:\n buf_clear(&cdata);\n\n\nshutdown_threads:\n // Stop all threads\n pthread_mutex_lock(&lock);\n donereading = true;\n pthread_mutex_unlock(&lock);\n pthread_cond_broadcast(&cond);\n\n // Wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n stats->nexpired += ctx->nexpired;\n stats->ninserted += ctx->ninserted;\n }\n }\n\n // Get the current error, if any\n errno = 0;\n ok = ok && !failure;\n if (!ok) {\n errno = therrnum;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n if (!ctx->ok) {\n errno = ctx->errnum;\n break;\n }\n }\n }\n }\n\n // Free all resources.\n for (int i = 0; i < nblocks; i++) {\n buf_clear(&blocks[i].cdata);\n }\n xfree(blocks);\n xfree(ctxs);\n close(fd);\n return ok ? 0 : -1;\n}\n\n// removes all work files and checks that the current directory is valid.\nbool cleanwork(const char *persist) {\n if (*persist == '\\0') {\n return false;\n }\n bool ok = false;\n char *path = xmalloc(strlen(persist)+1);\n strcpy(path, persist);\n char *dirpath = dirname(path);\n DIR *dir = opendir(dirpath);\n if (!dir) {\n perror(\"# opendir\");\n goto done;\n }\n struct dirent *entry;\n while ((entry = readdir(dir))) {\n if (entry->d_type != DT_REG) {\n continue;\n }\n const char *ext = \".pogocache.work\";\n if (strlen(entry->d_name) < strlen(ext) ||\n strcmp(entry->d_name+strlen(entry->d_name)-strlen(ext), ext) != 0)\n {\n continue;\n }\n size_t filepathcap = strlen(dirpath)+1+strlen(entry->d_name)+1;\n char *filepath = xmalloc(filepathcap);\n snprintf(filepath, filepathcap, \"%s/%s\", dirpath, entry->d_name);\n if (unlink(filepath) == 0) {\n printf(\"# deleted work file %s\\n\", filepath);\n } else {\n perror(\"# unlink\");\n }\n xfree(filepath);\n }\n ok = true;\ndone:\n if (dir) {\n closedir(dir);\n }\n xfree(path);\n return ok;\n}\n"], ["/pogocache/src/cmds.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit cmd.c handles all incoming client commands.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n#include \"pogocache.h\"\n#include \"stats.h\"\n\n// from main.c\nextern const uint64_t seed;\nextern const char *path;\nextern const int verb;\nextern const char *auth;\nextern const bool useauth;\nextern const char *persist;\nextern const int nthreads;\nextern const char *version;\nextern const char *githash;\nextern atomic_int_fast64_t flush_delay;\nextern atomic_bool sweep;\nextern atomic_bool lowmem;\nextern const int nshards;\nextern const int narenas;\nextern const int64_t procstart;\nextern const int maxconns;\n\nextern struct pogocache *cache;\n\nstruct set_entry_context {\n bool written;\n struct conn *conn;\n const char *cmdname;\n};\n\nstatic bool set_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)val, (void)vallen,\n (void)expires, (void)flags, (void)cas;\n struct set_entry_context *ctx = udata;\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n pg_write_row_desc(ctx->conn, (const char*[]){ \"value\" }, 1);\n pg_write_row_data(ctx->conn, (const char*[]){ val }, \n (size_t[]){ vallen }, 1);\n pg_write_completef(ctx->conn, \"%s 1\", ctx->cmdname);\n pg_write_ready(ctx->conn, 'I');\n } else {\n conn_write_bulk(ctx->conn, val, vallen);\n }\n ctx->written = true;\n return true;\n}\n\nstatic void execSET(struct conn *conn, const char *cmdname, \n int64_t now, const char *key,\n size_t keylen, const char *val, size_t vallen, int64_t expires, bool nx,\n bool xx, bool get, bool keepttl, uint32_t flags, uint64_t cas, bool withcas)\n{\n stat_cmd_set_incr(conn);\n struct set_entry_context ctx = { .conn = conn, .cmdname = cmdname };\n struct pogocache_store_opts opts = {\n .time = now,\n .expires = expires,\n .cas = cas,\n .flags = flags,\n .keepttl = keepttl,\n .casop = withcas,\n .nx = nx,\n .xx = xx,\n .lowmem = atomic_load_explicit(&lowmem, __ATOMIC_ACQUIRE),\n .entry = get?set_entry:0,\n .udata = get?&ctx:0,\n };\n int status = pogocache_store(cache, key, keylen, val, vallen, &opts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n return;\n }\n if (get) {\n if (!ctx.written) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n pg_write_completef(conn, \"%s 0\", cmdname);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_null(conn);\n }\n }\n return;\n }\n bool stored = status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED;\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (!stored) {\n if (status == POGOCACHE_FOUND) {\n conn_write_raw(conn, \"EXISTS\\r\\n\", 8);\n } else {\n conn_write_raw(conn, \"NOT_FOUND\\r\\n\", 12);\n }\n } else {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n }\n break;\n case PROTO_HTTP:\n if (!stored) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Stored\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"%s %d\", cmdname, stored?1:0);\n pg_write_ready(conn, 'I');\n break;\n default:\n if (!stored) {\n conn_write_null(conn);\n } else {\n conn_write_string(conn, \"OK\");\n }\n break;\n }\n}\n\nstatic int64_t expiry_seconds_time(struct conn *conn, int64_t now, \n int64_t expiry)\n{\n if (conn_proto(conn) == PROTO_MEMCACHE && expiry > HOUR*24*30) {\n // Consider Unix time value rather than an offset from current time.\n int64_t unix_ = sys_unixnow();\n if (expiry > unix_) {\n expiry = expiry-sys_unixnow();\n } else {\n expiry = 0;\n }\n }\n return int64_add_clamp(now, expiry);\n}\n\n// SET key value [NX | XX] [GET] [EX seconds | PX milliseconds |\n// EXAT unix-time-seconds | PXAT unix-time-milliseconds | KEEPTTL] \n// [FLAGS flags] [CAS cas] \nstatic void cmdSET(struct conn *conn, struct args *args) {\n#ifdef CMDSETOK\n // For testing the theoretical top speed of a single SET command.\n // No data is stored.\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\n#endif\n // RESP command\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n const char *val = args->bufs[2].data;\n size_t vallen = args->bufs[2].len;\n int64_t expires = 0;\n int exkind = 0;\n bool nx = false;\n bool xx = false;\n bool get = false;\n bool keepttl = false;\n bool hasex = false;\n uint32_t flags = 0;\n uint64_t cas = 0;\n bool withcas = false;\n for (size_t i = 3; i < args->len; i++) {\n if (argeq(args, i, \"ex\")) {\n exkind = 1;\n goto parse_ex;\n } else if (argeq(args, i, \"px\")) {\n exkind = 2;\n goto parse_ex;\n } else if (argeq(args, i, \"exat\")) {\n exkind = 3;\n goto parse_ex;\n } else if (argeq(args, i, \"pxat\")) {\n exkind = 4;\n parse_ex:\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, \n &expires);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n if (expires <= 0) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n // memcache allows for negative expiration\n expires = expiry_seconds_time(conn, now, 0);\n goto skip_exkind;\n } else {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n }\n switch (exkind) {\n case 1:\n expires = int64_mul_clamp(expires, SECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 2:\n expires = int64_mul_clamp(expires, MILLISECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 3:\n expires = int64_mul_clamp(expires, SECOND);\n break;\n case 4:\n expires = int64_mul_clamp(expires, MILLISECOND);\n break;\n }\n skip_exkind:\n hasex = true;\n } else if (argeq(args, i, \"nx\")) {\n nx = true;\n } else if (argeq(args, i, \"xx\")) {\n xx = true;\n } else if (argeq(args, i, \"get\")) {\n get = true;\n } else if (argeq(args, i, \"keepttl\")) {\n keepttl = true;\n } else if (argeq(args, i, \"flags\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n uint64_t x;\n if (!argu64(args, i, &x)) {\n goto err_syntax;\n }\n flags = x&UINT32_MAX;\n } else if (argeq(args, i, \"cas\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n if (!argu64(args, i, &cas)) {\n goto err_syntax;\n }\n withcas = true;\n } else {\n goto err_syntax;\n }\n }\n assert(expires >= 0);\n if (keepttl && hasex > 0){\n goto err_syntax;\n }\n if (xx && nx > 0){\n goto err_syntax;\n }\n execSET(conn, \"SET\", now, key, keylen, val, vallen, expires, nx, xx, get,\n keepttl, flags, cas, withcas);\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n}\n\nstatic void cmdSETEX(struct conn *conn, struct args *args) {\n if (args->len != 4) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t ex = 0;\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool ok = parse_i64(args->bufs[2].data, args->bufs[2].len, &ex);\n if (!ok || ex <= 0) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n ex = int64_mul_clamp(ex, SECOND);\n ex = int64_add_clamp(sys_now(), ex);\n const char *val = args->bufs[3].data;\n size_t vallen = args->bufs[3].len;\n execSET(conn, \"SETEX\", now, key, keylen, val, vallen, ex, 0, 0, 0, 0, 0, 0,\n 0);\n}\n\nstruct get_entry_context {\n struct conn *conn;\n bool cas;\n bool mget;\n};\n\nstatic void get_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)key, (void)keylen, (void)cas;\n (void)shard, (void)time, (void)expires, (void)flags, (void)update;\n struct get_entry_context *ctx = udata;\n int x;\n uint8_t buf[24];\n size_t n;\n switch (conn_proto(ctx->conn)) {\n case PROTO_POSTGRES:;\n char casbuf[24];\n if (ctx->cas) {\n x = 1;\n n = snprintf(casbuf, sizeof(casbuf), \"%\" PRIu64, cas);\n } else {\n x = 0;\n casbuf[0] = '\\0';\n n = 0;\n }\n if (ctx->mget) {\n pg_write_row_data(ctx->conn, (const char*[]){ key, val, casbuf }, \n (size_t[]){ keylen, vallen, n }, 2+x);\n } else {\n pg_write_row_data(ctx->conn, (const char*[]){ val, casbuf }, \n (size_t[]){ vallen, n }, 1+x);\n }\n break;\n case PROTO_MEMCACHE:\n conn_write_raw(ctx->conn, \"VALUE \", 6);\n conn_write_raw(ctx->conn, key, keylen);\n n = u64toa(flags, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n n = u64toa(vallen, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n if (ctx->cas) {\n n = u64toa(cas, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n }\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n conn_write_raw(ctx->conn, val, vallen);\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n break;\n case PROTO_HTTP:\n conn_write_http(ctx->conn, 200, \"OK\", val, vallen);\n break;\n default:\n if (ctx->cas) {\n conn_write_array(ctx->conn, 2);\n conn_write_uint(ctx->conn, cas);\n }\n conn_write_bulk(ctx->conn, val, vallen);\n }\n}\n\n// GET key\nstatic void cmdGET(struct conn *conn, struct args *args) {\n stat_cmd_get_incr(conn);\n#ifdef CMDGETNIL\n conn_write_null(conn);\n return;\n#endif\n#ifdef CMDSETOK\n conn_write_string(conn, \"$1\\r\\nx\\r\\n\");\n return;\n#endif\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n struct get_entry_context ctx = { \n .conn = conn\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_HTTP) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\" , -1);\n } else if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 0\");\n } else {\n conn_write_null(conn);\n }\n } else {\n stat_get_hits_incr(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 1\");\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_ready(conn, 'I');\n }\n}\n\n// MGET key [key...]\nstatic void cmdMGET(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct get_entry_context ctx = { \n .conn = conn,\n .mget = true,\n .cas = argeq(args, 0, \"mgets\"),\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int count = 0;\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\", \"value\", \"cas\" }, \n 2+(ctx.cas?1:0));\n } else if (proto == PROTO_RESP) {\n conn_write_array(conn, args->len-1);\n }\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_get_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_null(conn);\n }\n } else {\n count++;\n stat_get_hits_incr(conn);\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"MGET %d\", count);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n }\n}\n\nstruct keys_ctx {\n int64_t now;\n struct buf buf;\n size_t count;\n char *pattern;\n size_t plen;\n};\n\nstatic void keys_ctx_free(struct keys_ctx *ctx) {\n xfree(ctx->pattern);\n buf_clear(&ctx->buf);\n xfree(ctx);\n}\n\n// pattern matcher\n// see https://github.com/tidwall/match.c\nstatic bool match(const char *pat, size_t plen, const char *str, size_t slen,\n int depth)\n{\n if (depth == 128) {\n return false;\n }\n while (plen > 0) {\n if (pat[0] == '\\\\') {\n if (plen == 1) return false;\n pat++; plen--; \n } else if (pat[0] == '*') {\n if (plen == 1) return true;\n if (pat[1] == '*') {\n pat++; plen--;\n continue;\n }\n if (match(pat+1, plen-1, str, slen, depth+1)) return true;\n if (slen == 0) return false;\n str++; slen--;\n continue;\n }\n if (slen == 0) return false;\n if (pat[0] != '?' && str[0] != pat[0]) return false;\n pat++; plen--;\n str++; slen--;\n }\n return slen == 0 && plen == 0;\n}\n\nstatic int keys_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)value, (void)valuelen, (void)expires, \n (void)flags, (void)cas;\n struct keys_ctx *ctx = udata;\n if ((ctx->plen == 1 && *ctx->pattern == '*') || \n match(ctx->pattern, ctx->plen, key, keylen, 0))\n {\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n ctx->count++;\n }\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void bgkeys_work(void *udata) {\n struct keys_ctx *ctx = udata;\n struct pogocache_iter_opts opts = {\n .time = ctx->now,\n .entry = keys_entry,\n .udata = ctx,\n };\n pogocache_iter(cache, &opts);\n}\n\nstatic void bgkeys_done(struct conn *conn, void *udata) {\n struct keys_ctx *ctx = udata;\n int proto = conn_proto(conn);\n const char *p = ctx->buf.data;\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\" }, 1);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n pg_write_row_data(conn, (const char*[]){ key }, \n (size_t[]){ keylen }, 1);\n }\n pg_write_completef(conn, \"KEYS %zu\", ctx->count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_array(conn, ctx->count);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n conn_write_bulk(conn, key, keylen);\n }\n }\n keys_ctx_free(ctx);\n}\n\nstatic void cmdKEYS(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *pattern = args->bufs[1].data;\n size_t plen = args->bufs[1].len;\n struct keys_ctx *ctx = xmalloc(sizeof(struct keys_ctx));\n memset(ctx, 0, sizeof(struct keys_ctx));\n ctx->pattern = xmalloc(plen+1);\n memcpy(ctx->pattern, pattern, plen);\n ctx->pattern[plen] = '\\0';\n ctx->plen = plen;\n ctx->now = now;\n if (!conn_bgwork(conn, bgkeys_work, bgkeys_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n keys_ctx_free(ctx);\n }\n}\n\nstatic void cmdDEL(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct pogocache_delete_opts opts = {\n .time = now,\n };\n int64_t deleted = 0;\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_delete(cache, key, keylen, &opts);\n if (status == POGOCACHE_DELETED) {\n stat_delete_hits_incr(conn);\n deleted++;\n } else {\n stat_delete_misses_incr(conn);\n }\n }\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (deleted == 0) {\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n } else {\n conn_write_raw_cstr(conn, \"DELETED\\r\\n\");\n }\n break;\n case PROTO_HTTP:\n if (deleted == 0) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Deleted\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"DEL %\" PRIi64, deleted);\n pg_write_ready(conn, 'I');\n break;\n default:\n conn_write_int(conn, deleted);\n }\n}\n\nstatic void cmdDBSIZE(struct conn *conn, struct args *args) {\n if (args->len != 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n struct pogocache_count_opts opts = { .time = sys_now() };\n size_t count = pogocache_count(cache, &opts);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"count\", count, \"DBSIZE\");\n } else {\n conn_write_int(conn, (int64_t)count);\n }\n}\n\nstruct flushctx { \n pthread_t th;\n int64_t time;\n int start;\n int count;\n};\n\nstatic void *thflush(void *arg) {\n struct flushctx *ctx = arg;\n struct pogocache_clear_opts opts = { .time = sys_now(), .oneshard = true };\n for (int i = 0; i < ctx->count; i++) {\n opts.oneshardidx = i+ctx->start;\n pogocache_clear(cache, &opts);\n }\n return 0;\n}\n\nstatic void bgflushwork(void *udata) {\n (void)udata;\n atomic_store(&flush_delay, 0);\n int64_t now = sys_now();\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n struct flushctx *ctxs = xmalloc(nprocs*sizeof(struct flushctx));\n memset(ctxs, 0, nprocs*sizeof(struct flushctx));\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->time = now;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (pthread_create(&ctx->th, 0, thflush, ctx) == -1) {\n ctx->th = 0;\n }\n start += ctx->count;\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thflush(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n}\n\nstatic void bgflushdone(struct conn *conn, void *udata) {\n const char *cmdname = udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s SYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\n// FLUSHALL [SYNC|ASYNC] [DELAY ]\nstatic void cmdFLUSHALL(struct conn *conn, struct args *args) {\n const char *cmdname = \n args_eq(args, 0, \"flush\") ? \"FLUSH\" :\n args_eq(args, 0, \"flushdb\") ? \"FLUSHDB\" :\n \"FLUSHALL\";\n stat_cmd_flush_incr(conn);\n bool async = false;\n int64_t delay = 0;\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"async\")) {\n async = true;\n } else if (argeq(args, i, \"sync\")) {\n async = false;\n } else if (argeq(args, i, \"delay\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, &delay);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid exptime argument\");\n return;\n }\n if (delay > 0) {\n async = true;\n }\n } else {\n goto err_syntax;\n }\n }\n if (async) {\n if (delay < 0) {\n delay = 0;\n }\n delay = int64_mul_clamp(delay, SECOND);\n delay = int64_add_clamp(delay, sys_now());\n atomic_store(&flush_delay, delay);\n // ticker will check the delay and perform the flush\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s ASYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n // Flush database is slow. cmdname is static and thread safe\n conn_bgwork(conn, bgflushwork, bgflushdone, (void*)cmdname);\n return;\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct bgsaveloadctx {\n bool ok; // true = success, false = out of disk space\n bool fast; // use all the proccesing power, otherwise one thread.\n char *path; // path to file\n bool load; // otherwise save\n};\n\nstatic void bgsaveloadwork(void *udata) {\n struct bgsaveloadctx *ctx = udata;\n int64_t start = sys_now();\n int status;\n if (ctx->load) {\n status = load(ctx->path, ctx->fast, 0);\n } else {\n status = save(ctx->path, ctx->fast);\n }\n printf(\". %s finished %.3f secs\\n\", ctx->load?\"load\":\"save\", \n (sys_now()-start)/1e9);\n ctx->ok = status == 0;\n}\n\nstatic void bgsaveloaddone(struct conn *conn, void *udata) {\n struct bgsaveloadctx *ctx = udata;\n if (ctx->ok) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s OK\", ctx->load?\"LOAD\":\"SAVE\");\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (ctx->load) {\n conn_write_error(conn, \"load failed\");\n } else {\n conn_write_error(conn, \"save failed\");\n }\n }\n xfree(ctx->path);\n xfree(ctx);\n}\n\n// SAVE [TO ] [FAST]\n// LOAD [FROM ] [FAST]\nstatic void cmdSAVELOAD(struct conn *conn, struct args *args) {\n bool load = argeq(args, 0, \"load\");\n bool fast = false;\n const char *path = persist;\n size_t plen = strlen(persist);\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"fast\")) {\n fast = true;\n } else if ((load && argeq(args, i, \"from\")) || \n (!load && argeq(args, i, \"to\")))\n {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n path = args->bufs[i].data;\n plen = args->bufs[i].len;\n } else {\n goto err_syntax;\n }\n }\n if (plen == 0) {\n conn_write_error(conn, \"ERR path not provided\");\n return;\n }\n struct bgsaveloadctx *ctx = xmalloc(sizeof(struct bgsaveloadctx));\n memset(ctx, 0, sizeof(struct bgsaveloadctx));\n ctx->fast = fast;\n ctx->path = xmalloc(plen+1);\n ctx->load = load;\n memcpy(ctx->path, path, plen);\n ctx->path[plen] = '\\0';\n if (!conn_bgwork(conn, bgsaveloadwork, bgsaveloaddone, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx->path);\n xfree(ctx);\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct ttlctx {\n struct conn *conn;\n bool pttl;\n};\n\nstatic void ttl_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)key, (void)keylen, (void)val, (void)vallen, (void)flags,\n (void)cas, (void)update;\n struct ttlctx *ctx = udata;\n int64_t ttl;\n if (expires > 0) {\n ttl = expires-time;\n if (ctx->pttl) {\n ttl /= MILLISECOND;\n } else {\n ttl /= SECOND;\n }\n } else {\n ttl = -1;\n }\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n char ttlstr[24];\n size_t n = i64toa(ttl, (uint8_t*)ttlstr);\n pg_write_row_data(ctx->conn, (const char*[]){ ttlstr }, \n (size_t[]){ n }, 1);\n } else {\n conn_write_int(ctx->conn, ttl);\n }\n}\n\nstatic void cmdTTL(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool pttl = argeq(args, 0, \"pttl\");\n struct ttlctx ctx = { .conn = conn, .pttl = pttl };\n struct pogocache_load_opts opts = {\n .time = sys_now(),\n .entry = ttl_entry,\n .notouch = true,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ pttl?\"pttl\":\"ttl\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_int(conn, -2);\n }\n } else {\n stat_get_hits_incr(conn);\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %d\", pttl?\"PTTL\":\"TTL\",\n status!=POGOCACHE_NOTFOUND);\n pg_write_ready(conn, 'I');\n }\n}\n\nstatic void expire_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)expires, (void)cas;\n struct pogocache_update *ctx = udata;\n ctx->flags = flags;\n ctx->value = value;\n ctx->valuelen = valuelen;\n *update = ctx;\n}\n\n// EXPIRE key seconds\n// returns 1 if success or 0 on failure. \nstatic void cmdEXPIRE(struct conn *conn, struct args *args) {\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n int64_t expires;\n if (!argi64(args, 2, &expires)) {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n return;\n }\n expires = int64_mul_clamp(expires, POGOCACHE_SECOND);\n expires = int64_add_clamp(now, expires);\n struct pogocache_update ctx = { .expires = expires };\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = expire_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(cache, key, keylen, &lopts);\n int ret = status == POGOCACHE_FOUND;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"EXPIRE %d\", ret);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, ret);\n }\n}\n\n// EXISTS key [key...]\n// Checks if one or more keys exist in the cache.\n// Return the number of keys that exist\nstatic void cmdEXISTS(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t count = 0;\n struct pogocache_load_opts opts = {\n .time = now,\n .notouch = true,\n };\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n count++;\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"exists\", count, \"EXISTS\");\n } else {\n conn_write_int(conn, count);\n }\n}\n\nstatic void sweep_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n size_t swept;\n size_t kept;\n struct pogocache_sweep_opts opts = {\n .time = start,\n };\n printf(\". sweep started\\n\");\n pogocache_sweep(cache, &swept, &kept, &opts);\n double elapsed = (sys_now()-start)/1e9;\n printf(\". sweep finished in %.2fs, (swept=%zu, kept=%zu) \\n\", elapsed, \n swept, kept);\n}\n\nstatic void sweep_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thsweep(void *arg) {\n (void)arg;\n sweep_work(0);\n return 0;\n}\n\n// SWEEP [ASYNC]\nstatic void cmdSWEEP(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thsweep, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, sweep_work, sweep_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstatic void purge_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n printf(\". purge started\\n\");\n xpurge();\n double elapsed = (sys_now()-start)/1e9;\n printf(\". purge finished in %.2fs\\n\", elapsed);\n}\n\nstatic void purge_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thpurge(void *arg) {\n (void)arg;\n purge_work(0);\n return 0;\n}\n\n// PURGE [ASYNC]\nstatic void cmdPURGE(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thpurge, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, purge_work, purge_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstruct populate_ctx {\n pthread_t th;\n size_t start;\n size_t count;\n char *prefix;\n size_t prefixlen;\n char *val;\n size_t vallen;\n bool randex;\n int randmin;\n int randmax;\n};\n\nstatic void *populate_entry(void *arg) {\n int64_t now = sys_now();\n struct populate_ctx *ctx = arg;\n char *key = xmalloc(ctx->prefixlen+32);\n memcpy(key, ctx->prefix, ctx->prefixlen);\n key[ctx->prefixlen++] = ':';\n for (size_t i = ctx->start; i < ctx->start+ctx->count; i++) {\n size_t n = i64toa(i, (uint8_t*)(key+ctx->prefixlen));\n size_t keylen = ctx->prefixlen+n;\n struct pogocache_store_opts opts = { \n .time = now,\n };\n if (ctx->randex) {\n int ex = (rand()%(ctx->randmax-ctx->randmin))+ctx->randmin;\n opts.ttl = ex*POGOCACHE_SECOND;\n }\n pogocache_store(cache, key, keylen, ctx->val, ctx->vallen, &opts);\n }\n xfree(key);\n return 0;\n}\n\n// DEBUG POPULATE [rand-ex-range]\n// DEBUG POPULATE \n// DEBUG POPULATE 1000000 test 16\n// DEBUG POPULATE 1000000 test 16 5-10\nstatic void cmdDEBUG_populate(struct conn *conn, struct args *args) {\n if (args->len != 4 && args->len != 5) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t count;\n if (!argi64(args, 1, &count) || count < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n size_t prefixlen = args->bufs[2].len;\n char *prefix = args->bufs[2].data;\n int64_t vallen;\n if (!argi64(args, 3, &vallen) || vallen < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n bool randex = false;\n int randmin = 0;\n int randmax = 0;\n if (args->len == 5) {\n size_t exlen = args->bufs[4].len;\n char *aex = args->bufs[4].data;\n char *ex = xmalloc(exlen+1);\n memcpy(ex, aex, exlen);\n ex[exlen] = '\\0';\n if (strchr(ex, '-')) {\n randmin = atoi(ex);\n randmax = atoi(strchr(ex, '-')+1);\n randex = true;\n }\n xfree(ex);\n }\n\n char *val = xmalloc(vallen);\n memset(val, 0, vallen);\n int nprocs = sys_nprocs();\n if (nprocs < 0) {\n nprocs = 1;\n }\n struct populate_ctx *ctxs = xmalloc(nprocs*sizeof(struct populate_ctx));\n memset(ctxs, 0, nprocs*sizeof(struct populate_ctx));\n size_t group = count/nprocs;\n size_t start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n ctx->start = start;\n if (i == nprocs-1) {\n ctx->count = count-start;\n } else {\n ctx->count = group;\n }\n ctx->prefix = prefix;\n ctx->prefixlen = prefixlen;\n ctx->val = val;\n ctx->vallen = vallen;\n ctx->randex = randex;\n ctx->randmin = randmin;\n ctx->randmax = randmax;\n if (pthread_create(&ctx->th, 0, populate_entry, ctx) == -1) {\n ctx->th = 0;\n }\n start += group;\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n populate_entry(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n xfree(val);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"DEBUG POPULATE %\" PRIi64, count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstruct dbg_detach_ctx {\n int64_t now;\n int64_t then;\n};\n\nstatic void detach_work(void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n ctx->then = sys_now();\n // printf(\". ----- DELAY START\\n\");\n // sleep(1);\n // printf(\". ----- DELAY END\\n\");\n}\n\nstatic void detach_done(struct conn *conn, void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n char buf[128];\n snprintf(buf, sizeof(buf), \"%\" PRId64 \":%\" PRId64, ctx->now, ctx->then);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_str_ready(conn, \"detach\", buf, \"DEBUG DETACH\");\n } else {\n conn_write_bulk_cstr(conn, buf);\n }\n xfree(ctx);\n}\n\n// DEBUG detach\nstatic void cmdDEBUG_detach(struct conn *conn, struct args *args) {\n (void)args;\n struct dbg_detach_ctx *ctx = xmalloc(sizeof(struct dbg_detach_ctx));\n memset(ctx, 0,sizeof(struct dbg_detach_ctx));\n ctx->now = sys_now();\n if (!conn_bgwork(conn, detach_work, detach_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx);\n }\n}\n\n// DEBUG subcommand (args...)\nstatic void cmdDEBUG(struct conn *conn, struct args *args) {\n if (args->len <= 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n // args = args[1:]\n args = &(struct args){ .bufs = args->bufs+1, .len = args->len-1 };\n if (argeq(args, 0, \"populate\")) {\n cmdDEBUG_populate(conn, args);\n } else if (argeq(args, 0, \"detach\")) {\n cmdDEBUG_detach(conn, args);\n } else {\n conn_write_error(conn, \"ERR unknown subcommand\");\n }\n}\n\nstatic void cmdECHO(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"ECHO\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n}\n\nstatic void cmdPING(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n if (args->len == 1) {\n pg_write_simple_row_str_ready(conn, \"message\", \"PONG\", \"PING\"); \n } else {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"PING\");\n }\n } else {\n if (args->len == 1) {\n conn_write_string(conn, \"PONG\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n }\n}\n\nstatic void cmdQUIT(struct conn *conn, struct args *args) {\n (void)args;\n if (conn_proto(conn) == PROTO_RESP) {\n conn_write_string(conn, \"OK\");\n }\n conn_close(conn);\n}\n\n// TOUCH key [key...]\nstatic void cmdTOUCH(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t touched = 0;\n struct pogocache_load_opts opts = { \n .time = now,\n };\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_touch_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n stat_touch_hits_incr(conn);\n touched++;\n } else {\n stat_touch_misses_incr(conn);\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"TOUCH %\" PRIi64, touched);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, touched);\n }\n}\n\nstruct get64ctx {\n bool ok;\n bool isunsigned;\n union {\n int64_t ival;\n uint64_t uval;\n };\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n};\n\nunion delta { \n uint64_t u;\n int64_t i;\n};\n\nstatic void get64(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update;\n struct get64ctx *ctx = udata;\n ctx->flags = flags;\n ctx->expires = expires;\n ctx->cas = cas;\n if (ctx->isunsigned) {\n ctx->ok = parse_u64(val, vallen, &ctx->uval);\n } else {\n ctx->ok = parse_i64(val, vallen, &ctx->ival);\n }\n}\n\nstatic void execINCRDECR(struct conn *conn, const char *key, size_t keylen, \n union delta delta, bool decr, bool isunsigned, const char *cmdname)\n{\n bool hit = false;\n bool miss = false;\n int64_t now = sys_now();\n struct get64ctx ctx = { .isunsigned = isunsigned };\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts gopts = {\n .time = now,\n .entry = get64,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &gopts);\n bool found = status == POGOCACHE_FOUND;\n if (found && !ctx.ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR cannot increment or \"\n \"decrement non-numeric value\\r\\n\");\n goto done;\n }\n goto fail_value_non_numeric;\n } else if (!found && conn_proto(conn) == PROTO_MEMCACHE) {\n miss = true;\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n goto done;\n }\n // add or subtract\n bool overflow;\n if (isunsigned) {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.uval, delta.u, &ctx.uval);\n } else {\n overflow = __builtin_add_overflow(ctx.uval, delta.u, &ctx.uval);\n }\n } else {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.ival, delta.i, &ctx.ival);\n } else {\n overflow = __builtin_add_overflow(ctx.ival, delta.i, &ctx.ival);\n }\n }\n if (overflow && conn_proto(conn) != PROTO_MEMCACHE) {\n goto fail_overflow;\n }\n // re-set the value\n char val[24];\n size_t vallen;\n if (isunsigned) {\n vallen = u64toa(ctx.uval, (uint8_t*)val);\n } else {\n vallen = i64toa(ctx.ival, (uint8_t*)val);\n }\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires, \n .flags = ctx.flags, \n .cas = ctx.cas,\n .udata = &ctx,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n char val[24];\n if (isunsigned) {\n snprintf(val, sizeof(val), \"%\" PRIu64, ctx.uval);\n } else {\n snprintf(val, sizeof(val), \"%\" PRIi64, ctx.ival);\n }\n pg_write_simple_row_str_readyf(conn, \"value\", val, \"%s\", cmdname);\n } else {\n if (isunsigned) {\n conn_write_uint(conn, ctx.uval);\n } else {\n conn_write_int(conn, ctx.ival);\n }\n }\n hit = true;\n goto done;\nfail_value_non_numeric:\n conn_write_error(conn, ERR_INVALID_INTEGER);\n goto done;\nfail_overflow:\n conn_write_error(conn, \"ERR increment or decrement would overflow\");\n goto done;\ndone:\n if (hit) {\n if (decr) {\n stat_decr_hits_incr(conn);\n } else {\n stat_incr_hits_incr(conn);\n }\n } else if (miss) {\n if (decr) {\n stat_decr_misses_incr(conn);\n } else {\n stat_incr_misses_incr(conn);\n }\n }\n pogocache_end(batch);\n}\n\nstatic void cmdINCRDECRBY(struct conn *conn, struct args *args, \n bool decr, const char *cmdname)\n{\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta;\n bool ok;\n if (isunsigned) {\n ok = argu64(args, 2, &delta.u);\n } else {\n ok = argi64(args, 2, &delta.i);\n }\n if (!ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR invalid numeric delta \"\n \"argument\\r\\n\");\n } else {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n }\n return;\n }\n execINCRDECR(conn, key, keylen, delta, decr, isunsigned, cmdname);\n}\n\n// DECRBY key num\nstatic void cmdDECRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, true, \"DECRBY\");\n}\n\n// INCRBY key num\nstatic void cmdINCRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, false, \"INCRBY\");\n}\n\n// DECR key\nstatic void cmdDECR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, true, isunsigned, \"DECR\");\n}\n\n// INCR key\nstatic void cmdINCR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, false, isunsigned, \"INCR\");\n}\n\nstruct appendctx {\n bool prepend;\n uint32_t flags;\n int64_t expires;\n const char *val;\n size_t vallen;\n char *outval;\n size_t outvallen;\n};\n\nstatic void append_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires, \n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update, (void)cas;\n struct appendctx *ctx = udata;\n ctx->expires = expires;\n ctx->flags = flags;\n ctx->outvallen = vallen+ctx->vallen;\n ctx->outval = xmalloc(ctx->outvallen);\n if (ctx->prepend) {\n memcpy(ctx->outval, ctx->val, ctx->vallen);\n memcpy(ctx->outval+ctx->vallen, val, vallen);\n } else {\n memcpy(ctx->outval, val, vallen);\n memcpy(ctx->outval+vallen, ctx->val, ctx->vallen);\n }\n}\n\n// APPEND \nstatic void cmdAPPEND(struct conn *conn, struct args *args) {\n int64_t now = sys_now();\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int proto = conn_proto(conn);\n bool prepend = argeq(args, 0, \"prepend\");\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n size_t vallen;\n const char *val = args_at(args, 2, &vallen);\n struct appendctx ctx = { \n .prepend = prepend,\n .val = val,\n .vallen = vallen,\n };\n size_t len;\n // Use a batch transaction for key isolation.\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = append_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &lopts);\n if (status == POGOCACHE_NOTFOUND) {\n if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"NOT_STORED\\r\\n\");\n goto done;\n }\n len = vallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n } else {\n if (ctx.outvallen > MAXARGSZ) {\n // do not let values become larger than 500MB\n xfree(ctx.outval);\n conn_write_error(conn, \"ERR value too large\");\n goto done;\n }\n len = ctx.outvallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires,\n .flags = ctx.flags,\n };\n status = pogocache_store(batch, key, keylen, ctx.outval, ctx.outvallen, \n &sopts);\n xfree(ctx.outval);\n }\n if (status == POGOCACHE_NOMEM) {\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %zu\", prepend?\"PREPEND\":\"APPEND\", len);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"STORED\\r\\n\");\n } else {\n conn_write_int(conn, len);\n }\ndone:\n pogocache_end(batch);\n}\n\nstatic void cmdPREPEND(struct conn *conn, struct args *args) {\n cmdAPPEND(conn, args);\n}\n\nstatic void cmdAUTH(struct conn *conn, struct args *args) {\n stat_auth_cmds_incr(0);\n if (!argeq(args, 0, \"auth\")) {\n stat_auth_errors_incr(0);\n goto noauth;\n }\n if (args->len == 3) {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n if (args->len > 3) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n if (args->len == 1) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (args->bufs[1].len != strlen(auth) || \n memcmp(auth, args->bufs[1].data, args->bufs[1].len) != 0)\n {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n conn_setauth(conn, true);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_complete(conn, \"AUTH OK\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\nnoauth:\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \n \"CLIENT_ERROR Authentication required\\r\\n\");\n } else {\n conn_write_error(conn, \"NOAUTH Authentication required.\");\n }\n return;\nwrongpass:\n conn_write_error(conn, \n \"WRONGPASS invalid username-password pair or user is disabled.\");\n}\n\nstruct stats {\n // use the args type as a list.\n struct args args;\n};\n\nstatic void stats_begin(struct stats *stats) {\n memset(stats, 0, sizeof(struct stats));\n}\n\nstatic void stats_end(struct stats *stats, struct conn *conn) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"stat\", \"value\" }, 2);\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n pg_write_row_data(conn, (const char*[]){ stat, val }, \n (size_t[]){ strlen(stat), strlen(val) }, 2);\n }\n pg_write_completef(conn, \"STATS %zu\", stats->args.len);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n char line[512];\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n size_t n = snprintf(line, sizeof(line), \"STAT %s\\r\\n\", stat);\n conn_write_raw(conn, line, n);\n }\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n } else {\n conn_write_array(conn, stats->args.len);\n for (size_t i = 0; i < stats->args.len; i++) {\n conn_write_array(conn, 2);\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n conn_write_bulk_cstr(conn, key);\n conn_write_bulk_cstr(conn, val);\n }\n }\n args_free(&stats->args);\n}\n\nstatic void stats_printf(struct stats *stats, const char *format, ...) {\n // initializing list pointer\n char line[512];\n va_list ap;\n va_start(ap, format);\n size_t len = vsnprintf(line, sizeof(line)-1, format, ap);\n va_end(ap);\n args_append(&stats->args, line, len+1, false); // include null-terminator\n}\n\nstatic void stats(struct conn *conn) {\n struct stats stats;\n stats_begin(&stats);\n stats_printf(&stats, \"pid %d\", getpid());\n stats_printf(&stats, \"uptime %.0f\", (sys_now()-procstart)/1e9);\n stats_printf(&stats, \"time %.0f\", sys_unixnow()/1e9);\n stats_printf(&stats, \"product %s\", \"pogocache\");\n stats_printf(&stats, \"version %s\", version);\n stats_printf(&stats, \"githash %s\", githash);\n stats_printf(&stats, \"pointer_size %zu\", sizeof(uintptr_t)*8);\n struct rusage usage;\n if (getrusage(RUSAGE_SELF, &usage) == 0) {\n stats_printf(&stats, \"rusage_user %ld.%06ld\",\n usage.ru_utime.tv_sec, usage.ru_utime.tv_usec);\n stats_printf(&stats, \"rusage_system %ld.%06ld\",\n usage.ru_stime.tv_sec, usage.ru_stime.tv_usec);\n }\n stats_printf(&stats, \"max_connections %zu\", maxconns);\n stats_printf(&stats, \"curr_connections %zu\", net_nconns());\n stats_printf(&stats, \"total_connections %zu\", net_tconns());\n stats_printf(&stats, \"rejected_connections %zu\", net_rconns());\n stats_printf(&stats, \"cmd_get %\" PRIu64, stat_cmd_get());\n stats_printf(&stats, \"cmd_set %\" PRIu64, stat_cmd_set());\n stats_printf(&stats, \"cmd_flush %\" PRIu64, stat_cmd_flush());\n stats_printf(&stats, \"cmd_touch %\" PRIu64, stat_cmd_touch());\n stats_printf(&stats, \"get_hits %\" PRIu64, stat_get_hits());\n stats_printf(&stats, \"get_misses %\" PRIu64, stat_get_misses());\n stats_printf(&stats, \"delete_misses %\" PRIu64, stat_delete_misses());\n stats_printf(&stats, \"delete_hits %\" PRIu64, stat_delete_hits());\n stats_printf(&stats, \"incr_misses %\" PRIu64, stat_incr_misses());\n stats_printf(&stats, \"incr_hits %\" PRIu64, stat_incr_hits());\n stats_printf(&stats, \"decr_misses %\" PRIu64, stat_decr_misses());\n stats_printf(&stats, \"decr_hits %\" PRIu64, stat_decr_hits());\n stats_printf(&stats, \"touch_hits %\" PRIu64, stat_touch_hits());\n stats_printf(&stats, \"touch_misses %\" PRIu64, stat_touch_misses());\n stats_printf(&stats, \"store_too_large %\" PRIu64, stat_store_too_large());\n stats_printf(&stats, \"store_no_memory %\" PRIu64, stat_store_no_memory());\n stats_printf(&stats, \"auth_cmds %\" PRIu64, stat_auth_cmds());\n stats_printf(&stats, \"auth_errors %\" PRIu64, stat_auth_errors());\n stats_printf(&stats, \"threads %d\", nthreads);\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n stats_printf(&stats, \"rss %zu\", meminfo.rss);\n struct pogocache_size_opts sopts = { .entriesonly=true };\n stats_printf(&stats, \"bytes %zu\", pogocache_size(cache, &sopts));\n stats_printf(&stats, \"curr_items %zu\", pogocache_count(cache, 0));\n stats_printf(&stats, \"total_items %\" PRIu64, pogocache_total(cache, 0));\n stats_end(&stats, conn);\n}\n\nstatic void cmdSTATS(struct conn *conn, struct args *args) {\n if (args->len == 1) {\n return stats(conn);\n }\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\n// Commands hash table. Lazy loaded per thread.\n// Simple open addressing using case-insensitive fnv1a hashes.\nstatic int nbuckets;\nstatic struct cmd *buckets;\n\nstruct cmd {\n const char *name;\n void (*func)(struct conn *conn, struct args *args);\n};\n\nstatic struct cmd cmds[] = {\n { \"set\", cmdSET }, // pg\n { \"get\", cmdGET }, // pg\n { \"del\", cmdDEL }, // pg\n { \"mget\", cmdMGET }, // pg\n { \"mgets\", cmdMGET }, // pg cas detected\n { \"ttl\", cmdTTL }, // pg\n { \"pttl\", cmdTTL }, // pg\n { \"expire\", cmdEXPIRE }, // pg\n { \"setex\", cmdSETEX }, // pg\n { \"dbsize\", cmdDBSIZE }, // pg\n { \"quit\", cmdQUIT }, // pg\n { \"echo\", cmdECHO }, // pg\n { \"exists\", cmdEXISTS }, // pg\n { \"flushdb\", cmdFLUSHALL }, // pg\n { \"flushall\", cmdFLUSHALL }, // pg\n { \"flush\", cmdFLUSHALL }, // pg\n { \"purge\", cmdPURGE }, // pg\n { \"sweep\", cmdSWEEP }, // pg\n { \"keys\", cmdKEYS }, // pg\n { \"ping\", cmdPING }, // pg\n { \"touch\", cmdTOUCH }, // pg\n { \"debug\", cmdDEBUG }, // pg\n { \"incrby\", cmdINCRBY }, // pg\n { \"decrby\", cmdDECRBY }, // pg\n { \"incr\", cmdINCR }, // pg\n { \"decr\", cmdDECR }, // pg\n { \"uincrby\", cmdINCRBY }, // pg unsigned detected in signed operation\n { \"udecrby\", cmdDECRBY }, // pg unsigned detected in signed operation\n { \"uincr\", cmdINCR }, // pg unsigned detected in signed operation\n { \"udecr\", cmdDECR }, // pg unsigned detected in signed operation\n { \"append\", cmdAPPEND }, // pg\n { \"prepend\", cmdPREPEND }, // pg\n { \"auth\", cmdAUTH }, // pg\n { \"save\", cmdSAVELOAD }, // pg\n { \"load\", cmdSAVELOAD }, // pg\n { \"stats\", cmdSTATS }, // pg memcache style stats\n};\n\nstatic void build_commands_table(void) {\n static __thread bool buckets_ready = false;\n static pthread_mutex_t cmd_build_lock = PTHREAD_MUTEX_INITIALIZER;\n static bool built = false;\n if (!buckets_ready) {\n pthread_mutex_lock(&cmd_build_lock);\n if (!built) {\n int ncmds = sizeof(cmds)/sizeof(struct cmd);\n int n = ncmds*8;\n nbuckets = 2;\n while (nbuckets < n) {\n nbuckets *= 2;\n }\n buckets = xmalloc(nbuckets*sizeof(struct cmd));\n memset(buckets, 0, nbuckets*sizeof(struct cmd));\n uint64_t hash;\n for (int i = 0; i < ncmds; i++) {\n hash = fnv1a_case(cmds[i].name, strlen(cmds[i].name));\n for (int j = 0; j < nbuckets; j++) {\n int k = (j+hash)&(nbuckets-1);\n if (!buckets[k].name) {\n buckets[k] = cmds[i];\n break;\n }\n }\n }\n built = true;\n }\n pthread_mutex_unlock(&cmd_build_lock);\n buckets_ready = true;\n }\n}\n\nstatic struct cmd *get_cmd(const char *name, size_t namelen) {\n build_commands_table();\n uint32_t hash = fnv1a_case(name, namelen);\n int j = hash&(nbuckets-1);\n while (1) {\n if (!buckets[j].name) {\n return 0;\n }\n if (argeq_bytes(name, namelen, buckets[j].name)) {\n return &buckets[j];\n }\n j++;\n }\n}\n\nvoid evcommand(struct conn *conn, struct args *args) {\n if (useauth && !conn_auth(conn)) {\n if (conn_proto(conn) == PROTO_HTTP) {\n // Let HTTP traffic through.\n // The request has already been authorized in http.c\n } else {\n cmdAUTH(conn, args);\n return;\n }\n }\n if (verb > 1) {\n if (!argeq(args, 0, \"auth\")) {\n args_print(args);\n }\n }\n struct cmd *cmd = get_cmd(args->bufs[0].data, args->bufs[0].len);\n if (cmd) {\n cmd->func(conn, args);\n } else {\n if (verb > 0) {\n printf(\"# Unknown command '%.*s'\\n\", (int)args->bufs[0].len,\n args->bufs[0].data);\n }\n char errmsg[128];\n snprintf(errmsg, sizeof(errmsg), \"ERR unknown command '%.*s'\", \n (int)args->bufs[0].len, args->bufs[0].data);\n conn_write_error(conn, errmsg);\n }\n}\n"], ["/pogocache/src/conn.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit conn.c are interface functions for a network connection.\n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"args.h\"\n#include \"cmds.h\"\n#include \"xmalloc.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"helppage.h\"\n\n#define MAXPACKETSZ 1048576 // Maximum read packet size\n\nstruct conn {\n struct net_conn *conn5; // originating connection\n struct buf packet; // current incoming packet\n int proto; // connection protocol (memcache, http, etc)\n bool auth; // user is authorized\n bool noreply; // only for memcache\n bool keepalive; // only for http\n int httpvers; // only for http\n struct args args; // command args, if any\n struct pg *pg; // postgres context, only if proto is postgres\n};\n\nbool conn_istls(struct conn *conn) {\n return net_conn_istls(conn->conn5);\n}\n\nint conn_proto(struct conn *conn) {\n return conn->proto;\n}\n\nbool conn_auth(struct conn *conn) {\n return conn->auth;\n}\n\nvoid conn_setauth(struct conn *conn, bool ok) {\n conn->auth = ok;\n}\n\nbool conn_isclosed(struct conn *conn) {\n return net_conn_isclosed(conn->conn5);\n}\n\nvoid conn_close(struct conn *conn) {\n net_conn_close(conn->conn5);\n}\n\nvoid evopened(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = xmalloc(sizeof(struct conn));\n memset(conn, 0, sizeof(struct conn));\n conn->conn5 = conn5;\n net_conn_setudata(conn5, conn);\n}\n\nvoid evclosed(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n buf_clear(&conn->packet);\n args_free(&conn->args);\n pg_free(conn->pg);\n xfree(conn);\n}\n\n// network data handler\n// The evlen may be zero when returning from a bgwork routine, while having\n// existing data in the connection packet.\nvoid evdata(struct net_conn *conn5, const void *evdata, size_t evlen,\n void *udata)\n{\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n if (conn_isclosed(conn)) {\n goto close;\n }\n#ifdef DATASETOK\n if (evlen == 14 && memcmp(evdata, \"*1\\r\\n$4\\r\\nPING\\r\\n\", 14) == 0) {\n conn_write_raw(conn, \"+PONG\\r\\n\", 7);\n } else if (evlen == 13 && memcmp(evdata, \"*2\\r\\n$3\\r\\nGET\\r\\n\", 13) == 0) {\n conn_write_raw(conn, \"$1\\r\\nx\\r\\n\", 7);\n } else {\n conn_write_raw(conn, \"+OK\\r\\n\", 5);\n }\n return;\n#endif\n char *data;\n size_t len;\n bool copied;\n if (conn->packet.len == 0) {\n data = (char*)evdata;\n len = evlen;\n copied = false;\n } else {\n buf_append(&conn->packet, evdata, evlen);\n len = conn->packet.len;\n data = conn->packet.data;\n copied = true;\n }\n while (len > 0 && !conn_isclosed(conn)) {\n // Parse the command\n ssize_t n = parse_command(data, len, &conn->args, &conn->proto, \n &conn->noreply, &conn->httpvers, &conn->keepalive, &conn->pg);\n if (n == 0) {\n // Not enough data provided yet.\n break;\n } else if (n == -1) {\n // Protocol error occurred.\n conn_write_error(conn, parse_lasterror());\n if (conn->proto == PROTO_MEMCACHE) {\n // Memcache doesn't close, but we'll need to know the last\n // character position to continue and revert back to it so\n // we can attempt to continue to the next command.\n n = parse_lastmc_n();\n } else {\n // Close on protocol error\n conn_close(conn);\n break;\n }\n } else if (conn->args.len == 0) {\n // There were no command arguments provided.\n if (conn->proto == PROTO_POSTGRES) {\n if (!pg_respond(conn, conn->pg)) {\n // close connection\n conn_close(conn);\n break;\n }\n } else if (conn->proto == PROTO_MEMCACHE) {\n // Memcache simply returns a nondescript error.\n conn_write_error(conn, \"ERROR\");\n } else if (conn->proto == PROTO_HTTP) {\n // HTTP must always return arguments.\n assert(!\"PROTO_HTTP\");\n } else if (conn->proto == PROTO_RESP) {\n // RESP just continues until it gets args.\n }\n } else if (conn->proto == PROTO_POSTGRES && !conn->pg->ready) {\n // This should not have been reached. The client did not \n // send a startup message\n conn_close(conn);\n break;\n } else if (conn->proto != PROTO_POSTGRES || \n pg_precommand(conn, &conn->args, conn->pg))\n {\n evcommand(conn, &conn->args);\n }\n len -= n;\n data += n;\n if (net_conn_bgworking(conn->conn5)) {\n // BGWORK(0)\n break;\n }\n if (conn->proto == PROTO_HTTP) {\n conn_close(conn);\n }\n }\n if (conn_isclosed(conn)) {\n goto close;\n }\n if (len == 0) {\n if (copied) {\n if (conn->packet.cap > MAXPACKETSZ) {\n buf_clear(&conn->packet);\n }\n conn->packet.len = 0;\n }\n } else {\n if (copied) {\n memmove(conn->packet.data, data, len);\n conn->packet.len = len;\n } else {\n buf_append(&conn->packet, data, len);\n }\n }\n return;\nclose:\n conn_close(conn);\n}\n\nstruct bgworkctx {\n struct conn *conn;\n void *udata;\n void(*work)(void *udata);\n void(*done)(struct conn *conn, void *udata);\n};\n\nstatic void work5(void *udata) {\n struct bgworkctx *ctx = udata;\n ctx->work(ctx->udata);\n}\n\nstatic void done5(struct net_conn *conn, void *udata) {\n (void)conn;\n struct bgworkctx *ctx = udata;\n ctx->done(ctx->conn, ctx->udata);\n xfree(ctx);\n}\n\n// conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool conn_bgwork(struct conn *conn, void(*work)(void *udata), \n void(*done)(struct conn *conn, void *udata), void *udata)\n{\n struct bgworkctx *ctx = xmalloc(sizeof(struct bgworkctx));\n ctx->conn = conn;\n ctx->udata = udata;\n ctx->work = work;\n ctx->done = done;\n if (!net_conn_bgwork(conn->conn5, work5, done5, ctx)) {\n xfree(ctx);\n return false;\n }\n return true;\n}\n\nstatic void writeln(struct conn *conn, char ch, const void *data, ssize_t len) {\n if (len < 0) {\n len = strlen(data);\n }\n net_conn_out_ensure(conn->conn5, 3+len);\n net_conn_out_write_byte_nocheck(conn->conn5, ch);\n size_t mark = net_conn_out_len(conn->conn5);\n net_conn_out_write_nocheck(conn->conn5, data, len);\n net_conn_out_write_byte_nocheck(conn->conn5, '\\r');\n net_conn_out_write_byte_nocheck(conn->conn5, '\\n');\n uint8_t *out = (uint8_t*)net_conn_out(conn->conn5);\n for (ssize_t i = mark; i < len; i++) {\n if (out[i] < ' ') {\n out[i] = ' ';\n }\n }\n}\n\nstatic void write_error(struct conn *conn, const char *err, bool server) {\n if (conn->proto == PROTO_MEMCACHE) {\n if (strstr(err, \"ERR \") == err) {\n // convert to client or server error\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n if (server) {\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err+4); \n } else {\n snprintf(err2, err2sz, \"CLIENT_ERROR %s\\r\\n\", err+4); \n }\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n if (server) {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else if (strstr(err, \"CLIENT_ERROR \") == err || \n strstr(err, \"CLIENT_ERROR \") == err)\n {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"%s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n conn_write_raw(conn, \"ERROR\\r\\n\", 7);\n }\n }\n } else if (conn->proto == PROTO_POSTGRES) {\n if (strstr(err, \"ERR \") == err) {\n err = err+4;\n }\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n } else if (conn->proto == PROTO_HTTP) {\n if (strstr(err, \"ERR \") == err) {\n err += 4;\n }\n if (strcmp(err, \"Show Help HTML\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_HTML, -1);\n } else if (strcmp(err, \"Show Help TEXT\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_TEXT, -1);\n } else if (strcmp(err, \"Method Not Allowed\") == 0) {\n conn_write_http(conn, 405, \"Method Not Allowed\", \n \"Method Not Allowed\\r\\n\", -1);\n } else if (strcmp(err, \"Unauthorized\") == 0) {\n conn_write_http(conn, 401, \"Unauthorized\", \n \"Unauthorized\\r\\n\", -1);\n } else if (strcmp(err, \"Bad Request\") == 0) {\n conn_write_http(conn, 400, \"Bad Request\", \n \"Bad Request\\r\\n\", -1);\n } else {\n size_t sz = strlen(err)+32;\n char *err2 = xmalloc(sz);\n snprintf(err2, sz, \"ERR %s\\r\\n\", err);\n conn_write_http(conn, 500, \"Internal Server Error\", \n err2, -1);\n xfree(err2);\n }\n } else {\n writeln(conn, '-', err, -1);\n }\n}\n\nvoid conn_write_error(struct conn *conn, const char *err) {\n bool server = false;\n if (strcmp(err, ERR_OUT_OF_MEMORY) == 0) {\n server = true;\n }\n write_error(conn, err, server);\n}\n\nvoid conn_write_string(struct conn *conn, const char *cstr) {\n writeln(conn, '+', cstr, -1);\n}\n\nvoid conn_write_null(struct conn *conn) {\n net_conn_out_write(conn->conn5, \"$-1\\r\\n\", 5);\n}\n\nvoid resp_write_bulk(struct buf *buf, const void *data, size_t len) {\n uint8_t str[32];\n size_t n = u64toa(len, str);\n buf_append_byte(buf, '$');\n buf_append(buf, str, n);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n buf_append(buf, data, len);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n}\n\nvoid conn_write_bulk(struct conn *conn, const void *data, size_t len) {\n net_conn_out_ensure(conn->conn5, 32+len);\n size_t olen = net_conn_out_len(conn->conn5);\n uint8_t *base = (uint8_t*)net_conn_out(conn->conn5)+olen;\n uint8_t *p = base;\n *(p++) = '$';\n p += u64toa(len, p);\n *(p++) = '\\r';\n *(p++) = '\\n';\n memcpy(p, data, len);\n p += len;\n *(p++) = '\\r';\n *(p++) = '\\n';\n net_conn_out_setlen(conn->conn5, olen + (p-base));\n}\n\nvoid conn_write_raw(struct conn *conn, const void *data, size_t len) {\n net_conn_out_write(conn->conn5, data, len);\n}\n\nvoid conn_write_http(struct conn *conn, int code, const char *status,\n const void *body, ssize_t bodylen)\n{\n if (bodylen == -1) {\n if (!body) {\n body = status;\n }\n bodylen = strlen(body);\n }\n char resp[512];\n size_t n = snprintf(resp, sizeof(resp), \n \"HTTP/1.1 %d %s\\r\\n\"\n \"Content-Length: %zu\\r\\n\"\n \"Connection: Close\\r\\n\"\n \"\\r\\n\",\n code, status, bodylen);\n conn_write_raw(conn, resp, n);\n if (bodylen > 0) {\n conn_write_raw(conn, body, bodylen);\n }\n}\n\nvoid conn_write_array(struct conn *conn, size_t count) {\n uint8_t str[24];\n size_t n = u64toa(count, str);\n writeln(conn, '*', str, n);\n}\n\nvoid conn_write_uint(struct conn *conn, uint64_t value) {\n uint8_t buf[24];\n size_t n = u64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, '+', buf, n); // the '+' is needed for unsigned int\n }\n}\n\nvoid conn_write_int(struct conn *conn, int64_t value) {\n uint8_t buf[24];\n size_t n = i64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, ':', buf, n);\n }\n}\n\nvoid conn_write_raw_cstr(struct conn *conn, const char *cstr) {\n conn_write_raw(conn, cstr, strlen(cstr));\n}\n\nvoid conn_write_bulk_cstr(struct conn *conn, const char *cstr) {\n conn_write_bulk(conn, cstr, strlen(cstr));\n}\n\nvoid stat_cmd_get_incr(struct conn *conn) {\n net_stat_cmd_get_incr(conn->conn5);\n}\n\nvoid stat_cmd_set_incr(struct conn *conn) {\n net_stat_cmd_set_incr(conn->conn5);\n}\n\nvoid stat_get_hits_incr(struct conn *conn) {\n net_stat_get_hits_incr(conn->conn5);\n}\n\nvoid stat_get_misses_incr(struct conn *conn) {\n net_stat_get_misses_incr(conn->conn5);\n}\n\nbool pg_execute(struct conn *conn) {\n return conn->pg->execute;\n}\n\nstruct pg *conn_pg(struct conn *conn) {\n return conn->pg;\n}\n"], ["/pogocache/src/pogocache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit pogocache.c is the primary caching engine library, which is designed\n// to be standalone and embeddable.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"pogocache.h\"\n\n#define MINLOADFACTOR_RH 55 // 55%\n#define MAXLOADFACTOR_RH 95 // 95%\n#define DEFLOADFACTOR 75 // 75%\n#define SHRINKAT 10 // 10%\n#define DEFSHARDS 4096 // default number of shards\n#define INITCAP 64 // intial number of buckets per shard\n\n// #define DBGCHECKENTRY\n// #define EVICTONITER\n// #define HALFSECONDTIME\n// #define NO48BITPTRS\n\n#if INTPTR_MAX == INT64_MAX\n#ifdef NO48BITPTRS\n#define PTRSIZE 8\n#else\n#define PTRSIZE 6\n#endif\n#elif INTPTR_MAX == INT32_MAX\n#define PTRSIZE 4\n#else\n#error Unknown pointer size\n#endif\n\nstatic struct pogocache_count_opts defcountopts = { 0 };\nstatic struct pogocache_total_opts deftotalopts = { 0 };\nstatic struct pogocache_size_opts defsizeopts = { 0 };\nstatic struct pogocache_sweep_opts defsweepopts = { 0 };\nstatic struct pogocache_clear_opts defclearopts = { 0 };\nstatic struct pogocache_store_opts defstoreopts = { 0 };\nstatic struct pogocache_load_opts defloadopts = { 0 };\nstatic struct pogocache_delete_opts defdeleteopts = { 0 };\nstatic struct pogocache_iter_opts defiteropts = { 0 };\nstatic struct pogocache_sweep_poll_opts defsweeppollopts = { 0 };\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// returns monotonic nanoseconds of the CPU clock.\nstatic int64_t gettime(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// returns offset of system clock since first call in thread.\nstatic int64_t getnow(void) {\n return gettime();\n}\n\n// https://github.com/tidwall/th64\nstatic uint64_t th64(const void *data, size_t len, uint64_t seed) {\n uint8_t*p=(uint8_t*)data,*e=p+len;\n uint64_t r=0x14020a57acced8b7,x,h=seed;\n while(p+8<=e)memcpy(&x,p,8),x*=r,p+=8,x=x<<31|x>>33,h=h*r^x,h=h<<31|h>>33;\n while(p>31,h*=r,h^=h>>31,h*=r,h^=h>>31,h*=r,h);\n}\n\n// Load a pointer from an unaligned memory.\nstatic void *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\n// Store a pointer into unaligned memory.\nstatic void store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nstatic uint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\n// Sixpack compression algorithm\n// - Converts a simple 8-bit string into 6-bit string.\n// - Intended to be used on small strings that only use characters commonly\n// used for keys in KV data stores.\n// - Allows the following 64 item character set:\n// -.0123456789:ABCDEFGHIJKLMNOPRSTUVWXY_abcdefghijklmnopqrstuvwxy\n// Note that the characters \"QZz\" are not included.\n// - Sortable and comparable using memcmp.\nstatic char tosix[256] = {\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0-15\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, // 32-47\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 0, 0, // 48-63\n 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, // 64-79\n 29, 0, 30, 31, 32, 33, 34, 35, 36, 37, 0, 0, 0, 0, 0, 38, // 80-95\n 0, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, // 96-111\n 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, // 112-127\n};\n\nstatic char fromsix[] = {\n 0, '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', '_', 'a', 'b', 'c',\n 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'\n};\n\n// 0: [000000..] bitpos: 0\n// 1: [00000011][1111....] bitpos: 6\n// 2: [00000011][11112222][22......] bitpos: 12 \n// 3: [00000011][11112222][22333333] bitpos: 18\n\n// Sixpack data\n// Fills the data in dst and returns the number of bytes filled.\n// Returns 0 if not a sixpackable.\n// The dst array must be large enough to hold packed value\nstatic int sixpack(const char *data, int len, char dst[]){\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n for (int i = 0; i < len; i++) {\n int k6v = tosix[bytes[i]];\n if (k6v == 0) {\n return 0;\n }\n if (i%4 == 0) {\n dst[j++] = k6v<<2;\n } else if (i%4 == 1) {\n dst[j-1] |= k6v>>4;\n dst[j++] = k6v<<4;\n } else if (i%4 == 2) {\n dst[j-1] |= k6v>>2;\n dst[j++] = k6v<<6;\n } else {\n dst[j-1] |= k6v;\n }\n }\n return j;\n}\n\n// (Un)sixpack data.\n// Fills the data in dst and returns the len of original data.\n// The data must be sixpacked and len must be > 0.\n// The dst array must be large enough to hold unpacked value\nstatic int unsixpack(const char *data, int len, char dst[]) {\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n int k = 0;\n for (int i = 0; i < len; i++) {\n if (k == 0) {\n dst[j++] = fromsix[bytes[i]>>2];\n k++;\n } else if (k == 1) {\n dst[j++] = fromsix[((bytes[i-1]<<4)|(bytes[i]>>4))&63];\n k++;\n } else {\n dst[j++] = fromsix[((bytes[i-1]<<2)|(bytes[i]>>6))&63];\n dst[j++] = fromsix[bytes[i]&63];\n k = 0;\n }\n }\n if (j > 0 && dst[j-1] == 0) {\n j--;\n }\n return j;\n}\n\n// Safely adds two int64_t values, clamping on overflow.\nstatic int64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n/// https://github.com/tidwall/varint.c\nstatic int varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nstatic int varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\n#ifdef HALFSECONDTIME\ntypedef uint32_t etime_t;\n#else\ntypedef int64_t etime_t;\n#endif\n\n\n// Mostly a copy of the pogocache_opts, but used internally\n// See the opts_to_ctx function for translation.\nstruct pgctx {\n void *(*malloc)(size_t);\n void (*free)(void*);\n size_t (*malloc_size)(void*);\n void (*yield)(void *udata);\n void (*evicted)(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata);\n void *udata;\n bool usecas;\n bool nosixpack;\n bool noevict;\n bool allowshrink;\n bool usethreadbatch;\n int nshards;\n double loadfactor;\n double shrinkfactor;\n uint64_t seed;\n};\n\n// The entry structure is a simple allocation with all the fields, being \n// variable in size, slammed together contiguously. There's a one byte header\n// that provides information about what is available in the structure.\n// The format is: (header,time,expires?,flags?,cas?,key,value)\n// The expires, flags, and cas fields are optional. The optionality depends on\n// header bit flags.\nstruct entry;\n\n// Returns the sizeof the entry struct, which takes up no space at all.\n// This would be like doing a sizeof(struct entry), if entry had a structure.\nstatic size_t entry_struct_size(void) {\n return 0;\n}\n\n// Returns the data portion of the entry, which is the entire allocation.\nstatic const uint8_t *entry_data(const struct entry *entry) {\n return (uint8_t*)entry;\n}\n\nstatic int64_t entry_expires(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n int64_t x = 0;\n if ((hdr>>0)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\nstatic int64_t entry_time(struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n etime_t etime;\n memcpy(&etime, p+1, sizeof(etime_t));\n#ifdef HALFSECONDTIME\n int64_t time = (int64_t)etime * INT64_C(500000000);\n#else \n int64_t time = etime;\n#endif \n return time;\n}\n\nstatic void entry_settime(struct entry *entry, int64_t time) {\n const uint8_t *p = entry_data(entry);\n#ifdef HALFSECONDTIME\n // Eviction time is stored as half seconds.\n etime_t etime = time / INT64_C(500000000);\n etime = etime > UINT32_MAX ? UINT32_MAX : etime;\n#else\n etime_t etime = time;\n#endif\n memcpy((uint8_t*)(p+1), &etime, sizeof(etime_t));\n}\n\nstatic int entry_alive_exp(int64_t expires, int64_t etime, int64_t now,\n int64_t cleartime)\n{\n return etime < cleartime ? POGOCACHE_REASON_CLEARED :\n expires > 0 && expires <= now ? POGOCACHE_REASON_EXPIRED :\n 0;\n}\n\nstatic int entry_alive(struct entry *entry, int64_t now, int64_t cleartime) {\n int64_t etime = entry_time(entry);\n int64_t expires = entry_expires(entry);\n return entry_alive_exp(expires, etime, now, cleartime);\n}\n\nstatic uint64_t entry_cas(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n uint64_t x = 0;\n if ((hdr>>2)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\n// returns the key. If using sixpack make sure to copy the result asap.\nstatic const char *entry_key(const struct entry *entry, size_t *keylen_out,\n char buf[128])\n{\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n if ((hdr>>3)&1) {\n keylen = unsixpack(key, (int)keylen, buf);\n key = buf;\n }\n *keylen_out = keylen;\n return key;\n}\n\n// returns the raw key. sixpack will be returned in it's raw format\nstatic const char *entry_rawkey(const struct entry *entry, size_t *keylen_out) {\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n *keylen_out = keylen;\n return key;\n}\n\nstatic bool entry_sixpacked(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p);\n return (hdr>>3)&1;\n}\n\nstatic size_t entry_extract(const struct entry *entry, const char **key,\n size_t *keylen, char buf[128], const char **val, size_t *vallen, \n int64_t *expires, uint32_t *flags, uint64_t *cas,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n if (expires) {\n memcpy(expires, p, 8);\n }\n p += 8; // expires\n } else {\n if (expires) {\n *expires = 0;\n }\n }\n if ((hdr>>1)&1) {\n if (flags) {\n memcpy(flags, p, 4);\n }\n p += 4; // flags\n } else {\n if (flags) {\n *flags = 0;\n }\n }\n if (ctx->usecas) {\n if (cas) {\n memcpy(cas, p, 8);\n }\n p += 8; // cas\n } else {\n if (cas) {\n *cas = 0;\n }\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n if (key) {\n *key = (char*)p;\n *keylen = x;\n if ((hdr>>3)&1) {\n *keylen = unsixpack(*key, (int)*keylen, buf);\n *key = buf;\n }\n }\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n if (val) {\n *val = (char*)p;\n *vallen = x;\n }\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\nstatic size_t entry_memsize(const struct entry *entry,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if (ctx->usecas) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\n// The 'cas' param should always be set to zero unless loading from disk. \n// Setting to zero will set a new unique cas to the entry.\nstatic struct entry *entry_new(const char *key, size_t keylen, const char *val,\n size_t vallen, int64_t expires, uint32_t flags, uint64_t cas,\n struct pgctx *ctx)\n{\n bool usesixpack = !ctx->nosixpack;\n#ifdef DBGCHECKENTRY\n // printf(\"entry_new(key=[%.*s], keylen=%zu, val=[%.*s], vallen=%zu, \"\n // \"expires=%\" PRId64 \", flags=%\" PRId32 \", cas=%\" PRIu64 \", \"\n // \"usesixpack=%d\\n\", (int)keylen, key, keylen, (int)vallen, key, vallen,\n // expires, flags, cas, usesixpack);\n int64_t oexpires = expires;\n uint32_t oflags = flags;\n uint64_t ocas = cas;\n const char *okey = key;\n size_t okeylen = keylen;\n const char *oval = val;\n size_t ovallen = vallen;\n#endif\n uint8_t hdr = 0;\n uint8_t keylenbuf[10];\n uint8_t vallenbuf[10];\n int nexplen, nflagslen, ncaslen, nkeylen, nvallen;\n if (expires > 0) {\n hdr |= 1;\n nexplen = 8;\n } else {\n nexplen = 0;\n }\n if (flags > 0) {\n hdr |= 2;\n nflagslen = 4;\n } else {\n nflagslen = 0;\n }\n if (ctx->usecas) {\n hdr |= 4;\n ncaslen = 8;\n } else {\n ncaslen = 0;\n }\n char buf[128];\n if (usesixpack && keylen <= 128) {\n size_t len = sixpack(key, keylen, buf);\n if (len > 0) {\n hdr |= 8;\n keylen = len;\n key = buf;\n }\n }\n nkeylen = varint_write_u64(keylenbuf, keylen);\n nvallen = varint_write_u64(vallenbuf, vallen);\n struct entry *entry_out = 0;\n size_t size = entry_struct_size()+1+sizeof(etime_t)+nexplen+nflagslen+\n ncaslen+nkeylen+keylen+nvallen+vallen;\n // printf(\"malloc=%p size=%zu, ctx=%p\\n\", ctx->malloc, size, ctx);\n void *mem = ctx->malloc(size);\n struct entry *entry = mem;\n if (!entry) {\n return 0;\n }\n uint8_t *p = (void*)entry_data(entry);\n *(p++) = hdr;\n memset(p, 0, sizeof(etime_t));\n p += sizeof(etime_t); // time\n if (nexplen > 0) {\n memcpy(p, &expires, nexplen);\n p += nexplen;\n }\n if (nflagslen > 0) {\n memcpy(p, &flags, nflagslen);\n p += nflagslen;\n }\n if (ncaslen > 0) {\n memcpy(p, &cas, ncaslen);\n p += ncaslen;\n }\n memcpy(p, keylenbuf, nkeylen);\n p += nkeylen;\n memcpy(p, key, keylen);\n p += keylen;\n memcpy(p, vallenbuf, nvallen);\n p += nvallen;\n memcpy(p, val, vallen);\n p += vallen;\n entry_out = entry;\n#ifdef DBGCHECKENTRY\n // check the key\n const char *key2, *val2;\n size_t keylen2, vallen2;\n int64_t expires2;\n uint32_t flags2;\n uint64_t cas2;\n char buf1[256];\n entry_extract(entry_out, &key2, &keylen2, buf1, &val2, &vallen2, &expires2,\n &flags2, &cas2, ctx);\n assert(expires2 == oexpires);\n assert(flags2 == oflags);\n assert(cas2 == ocas);\n assert(keylen2 == okeylen);\n assert(memcmp(key2, okey, okeylen) == 0);\n assert(vallen2 == ovallen);\n assert(memcmp(val2, oval, ovallen) == 0);\n#endif\n return entry_out;\n}\n\nstatic void entry_free(struct entry *entry, struct pgctx *ctx) {\n ctx->free(entry);\n}\n\nstatic int entry_compare(const struct entry *a, const struct entry *b) {\n size_t akeylen, bkeylen;\n char buf1[256], buf2[256];\n const char *akey;\n const char *bkey;\n if (entry_sixpacked(a) == entry_sixpacked(b)) {\n akey = entry_rawkey(a, &akeylen);\n bkey = entry_rawkey(b, &bkeylen);\n } else {\n akey = entry_key(a, &akeylen, buf1);\n bkey = entry_key(b, &bkeylen, buf2);\n }\n size_t size = akeylen < bkeylen ? akeylen : bkeylen;\n int cmp = memcmp(akey, bkey, size);\n if (cmp == 0) {\n cmp = akeylen < bkeylen ? -1 : akeylen > bkeylen;\n }\n return cmp;\n}\n\n#ifndef HASHSIZE\n#define HASHSIZE 3\n#endif\n#if HASHSIZE < 1 || HASHSIZE > 4\n#error bad hash size\n#endif\n\nstruct bucket {\n uint8_t entry[PTRSIZE]; // 48-bit pointer\n uint8_t hash[HASHSIZE]; // 24-bit hash\n uint8_t dib; // distance to bucket\n};\n\nstatic_assert(sizeof(struct bucket) == PTRSIZE+HASHSIZE+1, \"bad bucket size\");\n\nstruct map {\n int cap; // initial capacity\n int nbuckets; // number of buckets\n int count; // current entry count\n int mask; // bit mask for \n int growat;\n int shrinkat;\n struct bucket *buckets;\n uint64_t total; // current entry count\n size_t entsize; // memory size of all entries\n \n};\n\nstruct shard {\n atomic_uintptr_t lock; // spinlock (batch pointer)\n uint64_t cas; // compare and store value\n int64_t cleartime; // last clear time\n int clearcount; // number of items cleared\n struct map map; // robinhood hashmap\n // for batch linked list only\n struct shard *next;\n};\n\nstatic void lock_init(struct shard *shard) {\n atomic_init(&shard->lock, 0);\n}\n\nstruct batch {\n struct pogocache *cache; // associated cache.\n struct shard *shard; // first locked shard\n int64_t time; // timestamp\n};\n\nstruct pogocache {\n bool isbatch; \n union {\n struct pgctx ctx;\n struct batch batch;\n };\n struct shard shards[];\n};\n\nstatic struct entry *get_entry(struct bucket *bucket) {\n return load_ptr(bucket->entry);\n}\n\nstatic void set_entry(struct bucket *bucket, struct entry *entry) {\n store_ptr(bucket->entry, entry);\n}\n\n#if HASHSIZE == 1\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFF;\n}\nstatic void write_hash(uint8_t data[1], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[1]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n return hash;\n}\n#elif HASHSIZE == 2\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFF;\n}\nstatic void write_hash(uint8_t data[2], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[2]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n return hash;\n}\n#elif HASHSIZE == 3\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFFFF;\n}\nstatic void write_hash(uint8_t data[3], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[3]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n return hash;\n}\n#else \nstatic uint32_t clip_hash(uint32_t hash) {\n return hash;\n}\nstatic void write_hash(uint8_t data[4], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n data[3] = (hash>>24)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[4]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n hash |= ((uint64_t)data[3])<<24;\n return hash;\n}\n#endif\n\nstatic uint32_t get_hash(struct bucket *bucket) {\n return read_hash(bucket->hash);\n}\n\nstatic void set_hash(struct bucket *bucket, uint32_t hash) {\n write_hash(bucket->hash, hash);\n}\n\nstatic uint8_t get_dib(struct bucket *bucket) {\n return bucket->dib;\n}\n\nstatic void set_dib(struct bucket *bucket, uint8_t dib) {\n bucket->dib = dib;\n}\n\nstatic bool map_init(struct map *map, size_t cap, struct pgctx *ctx) {\n map->cap = cap;\n map->nbuckets = cap;\n map->count = 0;\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * ctx->loadfactor;\n map->shrinkat = map->nbuckets * ctx->shrinkfactor;\n size_t size = sizeof(struct bucket)*map->nbuckets;\n map->buckets = ctx->malloc(size);\n if (!map->buckets) {\n // nomem\n memset(map, 0, sizeof(struct map));\n return false;\n }\n memset(map->buckets, 0, size);\n return true;\n}\n\nstatic bool resize(struct map *map, size_t new_cap, struct pgctx *ctx) {\n struct map map2;\n if (!map_init(&map2, new_cap, ctx)) {\n return false;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket ebkt = map->buckets[i];\n if (get_dib(&ebkt)) {\n set_dib(&ebkt, 1);\n size_t j = get_hash(&ebkt) & map2.mask;\n while (1) {\n if (get_dib(&map2.buckets[j]) == 0) {\n map2.buckets[j] = ebkt;\n break;\n }\n if (get_dib(&map2.buckets[j]) < get_dib(&ebkt)) {\n struct bucket tmp = map2.buckets[j];\n map2.buckets[j] = ebkt;\n ebkt = tmp;\n }\n j = (j + 1) & map2.mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n }\n }\n int org_cap = map->cap;\n int org_count = map->count;\n ctx->free(map->buckets);\n memcpy(map, &map2, sizeof(struct map));\n map->cap = org_cap;\n map->count = org_count;\n return true;\n}\n\nstatic bool map_insert(struct map *map, struct entry *entry, uint32_t hash,\n struct entry **old, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*2, ctx)) {\n *old = 0;\n return false;\n }\n }\n map->entsize += entry_memsize(entry, ctx);\n struct bucket ebkt;\n set_entry(&ebkt, entry);\n set_hash(&ebkt, hash);\n set_dib(&ebkt, 1);\n size_t i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n // new entry\n map->buckets[i] = ebkt;\n map->count++;\n map->total++;\n *old = 0;\n return true;\n }\n if (get_hash(&ebkt) == get_hash(&map->buckets[i]) && \n entry_compare(get_entry(&ebkt), get_entry(&map->buckets[i])) == 0)\n {\n // replaced\n *old = get_entry(&map->buckets[i]);\n map->entsize -= entry_memsize(*old, ctx);\n set_entry(&map->buckets[i], get_entry(&ebkt));\n return true;\n }\n if (get_dib(&map->buckets[i]) < get_dib(&ebkt)) {\n struct bucket tmp = map->buckets[i];\n map->buckets[i] = ebkt;\n ebkt = tmp;\n }\n i = (i + 1) & map->mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n}\n\nstatic bool bucket_eq(struct map *map, size_t i, const char *key,\n size_t keylen, uint32_t hash)\n{\n if (get_hash(&map->buckets[i]) != hash) {\n return false;\n }\n size_t keylen2;\n char buf[128];\n const char *key2 = entry_key(get_entry(&map->buckets[i]), &keylen2, buf);\n return keylen == keylen2 && memcmp(key, key2, keylen) == 0;\n}\n\n// Returns the bucket index for key, or -1 if not found.\nstatic int map_get_bucket(struct map *map, const char *key, size_t keylen,\n uint32_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while (1) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n return -1;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return i;\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic struct entry *map_get_entry(struct map *map, const char *key,\n size_t keylen, uint32_t hash, int *bkt_idx_out)\n{\n int i = map_get_bucket(map, key, keylen, hash);\n *bkt_idx_out = i;\n return i >= 0 ? get_entry(&map->buckets[i]) : 0;\n}\n\n// This deletes entry from bucket and adjusts the dibs buckets to right, if\n// needed.\nstatic void delbkt(struct map *map, size_t i) {\n set_dib(&map->buckets[i], 0);\n while (1) {\n size_t h = i;\n i = (i + 1) & map->mask;\n if (get_dib(&map->buckets[i]) <= 1) {\n set_dib(&map->buckets[h], 0);\n break;\n }\n map->buckets[h] = map->buckets[i];\n set_dib(&map->buckets[h], get_dib(&map->buckets[h])-1);\n }\n map->count--;\n}\n\nstatic bool needsshrink(struct map *map, struct pgctx *ctx) {\n return ctx->allowshrink && map->nbuckets > map->cap && \n map->count <= map->shrinkat;\n}\n\n// Try to shrink the hashmap. If needed, this will allocate a new hashmap that\n// has fewer buckets and move all existing entries into the smaller map.\n// The 'multi' param is a hint that multi entries may have been deleted, such\n// as with the iter or clear operations.\n// If the resize fails due to an allocation error then the existing hashmap\n// will be retained.\nstatic void tryshrink(struct map *map, bool multi, struct pgctx *ctx) {\n if (!needsshrink(map, ctx)) {\n return;\n }\n int cap;\n if (multi) {\n // Determine how many buckets are needed to store all entries.\n cap = map->cap;\n int growat = cap * ctx->loadfactor;\n while (map->count >= growat) {\n cap *= 2;\n growat = cap * ctx->loadfactor;\n }\n } else {\n // Just half the buckets\n cap = map->nbuckets / 2;\n }\n resize(map, cap, ctx);\n}\n\n// delete an entry at bucket position. not called directly\nstatic struct entry *delentry_at_bkt(struct map *map, size_t i, \n struct pgctx *ctx)\n{\n struct entry *old = get_entry(&map->buckets[i]);\n assert(old);\n map->entsize -= entry_memsize(old, ctx);\n delbkt(map, i);\n return old;\n}\n\nstatic struct entry *map_delete(struct map *map, const char *key,\n size_t keylen, uint32_t hash, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n int i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n return 0;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return delentry_at_bkt(map, i, ctx);\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic size_t evict_entry(struct shard *shard, int shardidx, \n struct entry *entry, int64_t now, int reason, struct pgctx *ctx)\n{\n char buf[128];\n size_t keylen;\n const char *key = entry_key(entry, &keylen, buf);\n uint32_t hash = th64(key, keylen, ctx->seed);\n struct entry *del = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(del == entry); (void)del;\n if (ctx->evicted) {\n // Notify user that an entry was evicted.\n const char *val;\n size_t vallen;\n int64_t expires = 0;\n uint32_t flags = 0;\n uint64_t cas = 0;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val,\n vallen, expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n size_t size = entry_memsize(entry, ctx);\n entry_free(entry, ctx);\n return size;\n}\n\n// evict an entry using the 2-random algorithm.\n// Pick two random entries and delete the one with the oldest access time.\n// Do not evict the entry if it matches the provided hash.\nstatic void auto_evict_entry(struct shard *shard, int shardidx, uint32_t hash,\n int64_t now, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n struct map *map = &shard->map;\n struct entry *entries[2];\n int count = 0;\n for (int i = 1; i < map->nbuckets && count < 2; i++) {\n size_t j = (i+hash)&(map->nbuckets-1);\n struct bucket *bkt = &map->buckets[j];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry has expired. Evict this one instead.\n evict_entry(shard, shardidx, entry, now, reason, ctx);\n return;\n }\n if (get_hash(bkt) == hash) {\n continue;\n }\n entries[count++] = entry;\n }\n int choose;\n if (count == 1) {\n choose = 0;\n } else if (count == 2) {\n // We now have two candidates.\n if (entry_time(entries[0]) < entry_time(entries[1])) {\n choose = 0;\n } else {\n choose = 1;\n }\n } else {\n return;\n }\n evict_entry(shard, shardidx, entries[choose], now, POGOCACHE_REASON_LOWMEM,\n ctx);\n}\n\nstatic void shard_deinit(struct shard *shard, struct pgctx *ctx) {\n struct map *map = &shard->map;\n if (!map->buckets) {\n return;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n entry_free(entry, ctx);\n }\n ctx->free(map->buckets);\n}\n\nstatic bool shard_init(struct shard *shard, struct pgctx *ctx) {\n memset(shard, 0, sizeof(struct shard));\n lock_init(shard);\n shard->cas = 1;\n if (!map_init(&shard->map, INITCAP, ctx)) {\n // nomem\n shard_deinit(shard, ctx);\n return false;\n }\n return true;\n}\n\n/// Free all cache and shard hashmap allocations.\n/// This does not access the value data in any of the entries. If it is needed\n/// for the further cleanup at an entry value level, then use the\n/// pogocache_iter to perform the cleanup on each entry before calling this\n/// operation.\n/// Also this is not threadsafe. Make sure that other threads are not\n/// currently using the cache concurrently nor after this function is called.\nvoid pogocache_free(struct pogocache *cache) {\n if (!cache) {\n return;\n }\n struct pgctx *ctx = &cache->ctx;\n for (int i = 0; i < cache->ctx.nshards; i++) {\n shard_deinit(&cache->shards[i], ctx);\n }\n cache->ctx.free(cache);\n}\n\nstatic void opts_to_ctx(int nshards, struct pogocache_opts *opts,\n struct pgctx *ctx)\n{\n ctx->nshards = nshards;\n int loadfactor = 0;\n if (opts) {\n ctx->yield = opts->yield;\n ctx->evicted = opts->evicted;\n ctx->udata = opts->udata;\n ctx->usecas = opts->usecas;\n ctx->nosixpack = opts->nosixpack;\n ctx->noevict = opts->noevict;\n ctx->seed = opts->seed;\n loadfactor = opts->loadfactor;\n ctx->allowshrink = opts->allowshrink;\n ctx->usethreadbatch = opts->usethreadbatch;\n }\n // make loadfactor a floating point\n loadfactor = loadfactor == 0 ? DEFLOADFACTOR :\n loadfactor < MINLOADFACTOR_RH ? MINLOADFACTOR_RH :\n loadfactor > MAXLOADFACTOR_RH ? MAXLOADFACTOR_RH :\n loadfactor;\n ctx->loadfactor = ((double)loadfactor/100.0);\n ctx->shrinkfactor = ((double)SHRINKAT/100.0);\n}\n\nstatic struct pogocache_opts newdefopts = { 0 };\n\n/// Returns a new cache or null if there is not enough memory available.\n/// See 'pogocache_opts' for all options.\nstruct pogocache *pogocache_new(struct pogocache_opts *opts) {\n if (!opts) {\n opts = &newdefopts;\n }\n void *(*_malloc)(size_t) = opts->malloc ? opts->malloc : malloc;\n void (*_free)(void*) = opts->free ? opts->free : free;\n int shards = !opts || opts->nshards <= 0 ? DEFSHARDS : opts->nshards;\n size_t size = sizeof(struct pogocache)+shards*sizeof(struct shard);\n struct pogocache *cache = _malloc(size);\n if (!cache) {\n return 0;\n }\n memset(cache, 0, sizeof(struct pogocache));\n struct pgctx *ctx = &cache->ctx;\n opts_to_ctx(shards, opts, ctx);\n ctx->malloc = _malloc;\n ctx->free = _free;\n for (int i = 0; i < ctx->nshards; i++) {\n if (!shard_init(&cache->shards[i], ctx)) {\n // nomem\n pogocache_free(cache);\n return 0;\n }\n }\n return cache;\n}\n\nstatic int shard_index(struct pogocache *cache, uint64_t hash) {\n return (hash>>32)%cache->ctx.nshards;\n}\n\nstatic struct shard *shard_get(struct pogocache *cache, int index) {\n return &cache->shards[index];\n}\n\n/// Returns a timestamp.\nint64_t pogocache_now(void) {\n return getnow();\n}\n\nstatic __thread struct pogocache thbatch;\n\nstruct pogocache *pogocache_begin(struct pogocache *cache) {\n struct pogocache *batch;\n if (cache->ctx.usethreadbatch) {\n batch = &thbatch;\n } else {\n batch = cache->ctx.malloc(sizeof(struct pogocache));\n if (!batch) {\n return 0;\n }\n }\n batch->isbatch = true;\n batch->batch.cache = cache;\n batch->batch.shard = 0;\n batch->batch.time = 0;\n return batch;\n}\n\nvoid pogocache_end(struct pogocache *batch) {\n assert(batch->isbatch);\n struct shard *shard = batch->batch.shard;\n while (shard) {\n struct shard *next = shard->next;\n shard->next = 0;\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE);\n shard = next;\n }\n if (!batch->batch.cache->ctx.usethreadbatch) {\n batch->batch.cache->ctx.free(batch);\n }\n}\n\nstatic void lock(struct batch *batch, struct shard *shard, struct pgctx *ctx) {\n if (batch) {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n (uintptr_t)(void*)batch, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n shard->next = batch->shard;\n batch->shard = shard;\n break;\n }\n if (val == (uintptr_t)(void*)batch) {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n } else {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n UINTPTR_MAX, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n }\n}\n\nstatic bool acquire_for_scan(int shardidx, struct shard **shard_out, \n struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *shard_out = shard;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// acquire a lock for the key\nstatic bool acquire_for_key(const char *key, size_t keylen, uint32_t *hash_out,\n struct shard **shard_out, int *shardidx_out, struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n uint64_t fhash = th64(key, keylen, cache->ctx.seed);\n int shardidx = shard_index(cache, fhash);\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *hash_out = fhash;\n *shard_out = shard;\n *shardidx_out = shardidx;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// Acquire a lock on the shard for key and execute the provided operation.\n#define ACQUIRE_FOR_KEY_AND_EXECUTE(rettype, key, keylen, op) ({ \\\n int shardidx; \\\n uint32_t hash; \\\n struct shard *shard; \\\n bool usebatch = acquire_for_key((key), (keylen), &hash, &shard, &shardidx, \\\n &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)shardidx, (void)hash, (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\n// Acquire a lock on the shard at index and execute the provided operation.\n#define ACQUIRE_FOR_SCAN_AND_EXECUTE(rettype, shardidx, op) ({ \\\n struct shard *shard; \\\n bool usebatch = acquire_for_scan((shardidx), &shard, &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\nstatic int loadop(const void *key, size_t keylen, \n struct pogocache_load_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defloadopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n // Get the entry bucket index for the entry with key.\n int bidx = map_get_bucket(&shard->map, key, keylen, hash);\n if (bidx == -1) {\n return POGOCACHE_NOTFOUND;\n }\n // Extract the bucket, entry, and values.\n struct bucket *bkt = &shard->map.buckets[bidx];\n struct entry *entry = get_entry(bkt);\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. Evict the entry and clear the bucket.\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(entry, ctx);\n delbkt(&shard->map, bidx);\n return POGOCACHE_NOTFOUND;\n }\n if (!opts->notouch) {\n entry_settime(entry, now);\n }\n if (opts->entry) {\n struct pogocache_update *update = 0;\n opts->entry(shardidx, now, key, keylen, val, vallen, expires, flags,\n cas, &update, opts->udata);\n if (update) {\n // User wants to update the entry.\n shard->cas++;\n struct entry *entry2 = entry_new(key, keylen, update->value,\n update->valuelen, update->expires, update->flags, shard->cas, \n ctx);\n if (!entry2) {\n return POGOCACHE_NOMEM;\n }\n entry_settime(entry2, now);\n set_entry(bkt, entry2);\n entry_free(entry, ctx);\n }\n }\n return POGOCACHE_FOUND;\n}\n\n/// Loads an entry from the cache.\n/// Use the pogocache_load_opts.entry callback to access the value of the entry.\n/// It's possible to update the value using the 'update' param in the callback.\n/// See 'pogocache_load_opts' for all options.\n/// @returns POGOCACHE_FOUND when the entry was found.\n/// @returns POGOCACHE_NOMEM when the entry cannot be updated due to no memory.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\nint pogocache_load(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_load_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen, \n loadop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int deleteop(const void *key, size_t keylen, \n struct pogocache_delete_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defdeleteopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n struct entry *entry = map_delete(&shard->map, key, keylen, hash, ctx);\n if (!entry) {\n // Entry does not exist\n return POGOCACHE_NOTFOUND;\n }\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. It was already deleted from the map but\n // we still need to notify the user.\n if (ctx->evicted) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (opts->entry) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen,\n expires, flags, cas, opts->udata))\n {\n // User canceled the delete. Put it back into the map.\n // This insert will not cause an allocation error because the \n // previous delete operation left us with at least one available\n // bucket.\n struct entry *old;\n bool ok = map_insert(&shard->map, entry, hash, &old, ctx);\n assert(ok); (void)ok;\n assert(!old);\n return POGOCACHE_CANCELED;\n }\n }\n // Entry was successfully deleted.\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_DELETED;\n}\n\n/// Deletes an entry from the cache.\n/// See 'pogocache_delete_opts' for all options.\n/// @returns POGOCACHE_DELETED when the entry was successfully deleted.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\n/// @returns POGOCACHE_CANCELED when opts.entry callback returned false.\nint pogocache_delete(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_delete_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n deleteop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int storeop(const void *key, size_t keylen, const void *val,\n size_t vallen, struct pogocache_store_opts *opts, struct shard *shard,\n int shardidx, uint32_t hash, struct pgctx *ctx)\n{\n int count = shard->map.count;\n opts = opts ? opts : &defstoreopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int64_t expires = 0;\n if (opts->expires > 0) {\n expires = opts->expires;\n } else if (opts->ttl > 0) {\n expires = int64_add_clamp(now, opts->ttl);\n }\n if (opts->keepttl) {\n // User wants to keep the existing ttl. Get the existing entry from the\n // map first and take its expiration.\n int i;\n struct entry *old = map_get_entry(&shard->map, key, keylen, hash, &i);\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason == 0) {\n expires = entry_expires(old);\n }\n }\n }\n shard->cas++;\n struct entry *entry = entry_new(key, keylen, val, vallen, expires,\n opts->flags, shard->cas, ctx);\n if (!entry) {\n goto nomem;\n }\n entry_settime(entry, now);\n if (opts->lowmem && ctx->noevict) {\n goto nomem;\n }\n // Insert new entry into map\n struct entry *old;\n if (!map_insert(&shard->map, entry, hash, &old, ctx)) {\n goto nomem;\n }\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason) {\n // There's an old entry, but it's no longer alive.\n // Treat this like an eviction and notify the user.\n if (ctx->evicted) {\n const char *oval;\n size_t ovallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0,\n &oval, &ovallen, &oexpires, &oflags, &ocas, ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, oval, ovallen,\n oexpires, oflags, ocas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(old, ctx);\n old = 0;\n }\n }\n int put_back_status = 0;\n if (old) {\n if (opts->casop) {\n // User is requesting the cas operation.\n if (ctx->usecas) {\n uint64_t old_cas = entry_cas(old);\n if (opts->cas != old_cas) {\n // CAS test failed.\n // printf(\". cas failed: expected %\" PRIu64 \", \"\n // \"got %\" PRIu64 \"\\n\", cas, old_cas);\n put_back_status = POGOCACHE_FOUND;\n }\n } else {\n put_back_status = POGOCACHE_FOUND;\n }\n } else if (opts->nx) {\n put_back_status = POGOCACHE_FOUND;\n }\n if (put_back_status) {\n put_back:;\n // The entry needs be put back into the map and operation must\n // return early.\n // This insert operation must not fail since the entry 'e' and\n // 'old' both exist and will always be bucket swapped. There will\n // never be a new allocation.\n struct entry *e = 0;\n bool ok = map_insert(&shard->map, old, hash, &e, ctx);\n assert(ok); (void)ok;\n assert(e == entry);\n entry_free(entry, ctx);\n return put_back_status;\n }\n } else if (opts->xx || opts->casop) {\n // The new entry must not be inserted.\n // Delete it and return early.\n struct entry *e = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(e == entry); (void)e;\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (old && opts->entry) {\n // User is requesting to verify the old entry before allowing it to be\n // replaced by the new entry.\n const char *val;\n size_t vallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0, &val, &vallen, &oexpires, &oflags, &ocas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen, oexpires,\n oflags, ocas, opts->udata))\n {\n // User wants to keep the old entry.\n put_back_status = POGOCACHE_CANCELED;\n goto put_back;\n }\n }\n // The new entry was inserted.\n if (old) {\n entry_free(old, ctx);\n return POGOCACHE_REPLACED;\n } else {\n if (opts->lowmem && shard->map.count > count) {\n // The map grew by one bucket, yet the user indicates that there is\n // a low memory event. Evict one entry.\n auto_evict_entry(shard, shardidx, hash, now, ctx);\n }\n return POGOCACHE_INSERTED;\n }\nnomem:\n entry_free(entry, ctx);\n return POGOCACHE_NOMEM;\n}\n\n/// Insert or replace an entry in the cache.\n/// If an entry with the same key already exists then the cache then the \n/// the opts.entry callback can be used to check the existing\n/// value first, allowing the operation to be canceled.\n/// See 'pogocache_store_opts' for all options.\n/// @returns POGOCACHE_INSERTED when the entry was inserted.\n/// @returns POGOCACHE_REPLACED when the entry replaced an existing one.\n/// @returns POGOCACHE_FOUND when the entry already exists. (cas/nx)\n/// @returns POGOCACHE_CANCELED when the operation was canceled.\n/// @returns POGOCACHE_NOMEM when there is system memory available.\nint pogocache_store(struct pogocache *cache, const void *key, size_t keylen, \n const void *val, size_t vallen, struct pogocache_store_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n storeop(key, keylen, val, vallen, opts, shard, shardidx, hash, ctx)\n );\n}\n\n\nstatic struct pogocache *rootcache(struct pogocache *cache) {\n return cache->isbatch ? cache->batch.cache : cache;\n}\n\n/// Returns the number of shards in cache\nint pogocache_nshards(struct pogocache *cache) {\n cache = rootcache(cache);\n return cache->ctx.nshards;\n}\n\nstatic int iterop(struct shard *shard, int shardidx, int64_t now,\n struct pogocache_iter_opts *opts, struct pgctx *ctx)\n{\n char buf[128];\n int status = POGOCACHE_FINISHED;\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen,\n &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n#ifdef EVICTONITER\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n // Delete entry at bucket.\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n#endif\n } else {\n // Entry is alive, check with user for next action.\n int action = POGOCACHE_ITER_CONTINUE;\n if (opts->entry) {\n action = opts->entry(shardidx, now, key, keylen, val,\n vallen, expires, flags, cas, opts->udata);\n }\n if (action != POGOCACHE_ITER_CONTINUE) {\n if (action&POGOCACHE_ITER_DELETE) {\n // Delete entry at bucket\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n }\n if (action&POGOCACHE_ITER_STOP) {\n status = POGOCACHE_CANCELED;\n break;\n }\n }\n }\n }\n tryshrink(&shard->map, true, ctx);\n return status;\n}\n\n/// Iterate over entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The pogocache_iter_opts.entry callback can be used to perform actions such\n/// as: deleting entries and stopping iteration early. \n/// See 'pogocache_iter_opts' for all options.\n/// @return POGOCACHE_FINISHED if iteration completed\n/// @return POGOCACHE_CANCELED if iteration stopped early\nint pogocache_iter(struct pogocache *cache, struct pogocache_iter_opts *opts) {\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defiteropts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return POGOCACHE_FINISHED;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n iterop(shard, opts->oneshardidx, now, opts, &cache->ctx)\n );\n }\n for (int i = 0; i < nshards; i++) {\n int status = ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n iterop(shard, i, now, opts, &cache->ctx)\n );\n if (status != POGOCACHE_FINISHED) {\n return status;\n }\n }\n return POGOCACHE_FINISHED;\n}\n\nstatic size_t countop(struct shard *shard) {\n return shard->map.count - shard->clearcount;\n}\n\n/// Returns the number of entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_count(struct pogocache *cache,\n struct pogocache_count_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defcountopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n countop(shard);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n countop(shard);\n );\n }\n return count;\n}\n\nstatic uint64_t totalop(struct shard *shard) {\n return shard->map.total;\n}\n\n/// Returns the total number of entries that have ever been stored in the cache.\n/// For the current number of entries use pogocache_count().\n/// There's an option to allow for isolating the operation to a single shard.\nuint64_t pogocache_total(struct pogocache *cache,\n struct pogocache_total_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &deftotalopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, opts->oneshardidx,\n totalop(shard);\n );\n }\n uint64_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, i,\n totalop(shard);\n );\n }\n return count;\n}\n\nstatic size_t sizeop(struct shard *shard, bool entriesonly) {\n size_t size = 0;\n if (!entriesonly) {\n size += sizeof(struct shard);\n size += sizeof(struct bucket)*shard->map.nbuckets;\n }\n size += shard->map.entsize;\n return size;\n}\n\n/// Returns the total memory size of the shard.\n/// This includes the memory size of all data structures and entries.\n/// Use the entriesonly option to limit the result to only the entries.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_size(struct pogocache *cache,\n struct pogocache_size_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsizeopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n sizeop(shard, opts->entriesonly);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n sizeop(shard, opts->entriesonly);\n );\n }\n return count;\n}\n\n\n\nstatic int sweepop(struct shard *shard, int shardidx, int64_t now,\n size_t *swept, size_t *kept, struct pgctx *ctx)\n{\n char buf[128];\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int64_t expires = entry_expires(entry);\n int64_t etime = entry_time(entry);\n int reason = entry_alive_exp(expires, etime, now, shard->cleartime);\n if (reason == 0) {\n // entry is still alive\n (*kept)++;\n continue;\n }\n // entry is no longer alive.\n if (ctx->evicted) {\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen, &expires,\n &flags, &cas, ctx);\n // Report eviction to user\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n (*swept)++;\n // Entry was deleted from bucket, which may move entries to the right\n // over one bucket to the left. So we need to check the same bucket\n // again.\n i--;\n }\n tryshrink(&shard->map, true, ctx);\n return 0;\n}\n\n/// Remove expired entries from the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The final 'kept' or 'swept' counts are returned.\n/// @return POGOCACHE_FINISHED when iteration completed\n/// @return POGOCACHE_CANCELED when iteration stopped early\nvoid pogocache_sweep(struct pogocache *cache, size_t *swept, size_t *kept, \n struct pogocache_sweep_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweepopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n size_t sweptc = 0;\n size_t keptc = 0;\n if (opts->oneshard) {\n if (opts->oneshardidx >= 0 && opts->oneshardidx < nshards) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n sweepop(shard, opts->oneshardidx, now, &sweptc, &keptc,\n &cache->ctx);\n );\n }\n } else {\n for (int i = 0; i < nshards; i++) {\n size_t sweptc2 = 0;\n size_t keptc2 = 0;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n sweepop(shard, i, now, &sweptc2, &keptc2, &cache->ctx);\n );\n sweptc += sweptc2;\n keptc += keptc2;\n }\n }\n if (swept) {\n *swept = sweptc;\n }\n if (kept) {\n *kept = keptc;\n }\n}\n\nstatic int clearop(struct shard *shard, int shardidx, int64_t now, \n struct pgctx *ctx)\n{\n (void)shardidx, (void)ctx;\n shard->cleartime = now;\n shard->clearcount += (shard->map.count-shard->clearcount);\n return 0;\n}\n\n/// Clear the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nvoid pogocache_clear(struct pogocache *cache, struct pogocache_clear_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defclearopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return;\n }\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n clearop(shard, opts->oneshardidx, now, &cache->ctx);\n );\n return;\n }\n for (int i = 0; i < cache->ctx.nshards; i++) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n clearop(shard, i, now, &cache->ctx);\n );\n }\n}\n\nstatic int sweeppollop(struct shard *shard, int shardidx, int64_t now, \n int pollsize, double *percent)\n{\n // start at random bucket\n int count = 0;\n int dead = 0;\n int bidx = mix13(now+shardidx)%shard->map.nbuckets;\n for (int i = 0; i < shard->map.nbuckets && count < pollsize; i++) {\n struct bucket *bkt = &shard->map.buckets[(bidx+i)%shard->map.nbuckets];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n count++;\n dead += (entry_alive(entry, now, shard->cleartime) != 0);\n }\n if (count == 0) {\n *percent = 0;\n return 0;\n }\n *percent = (double)dead/(double)count;\n return 0;\n}\n\ndouble pogocache_sweep_poll(struct pogocache *cache, \n struct pogocache_sweep_poll_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweeppollopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int pollsize = opts->pollsize == 0 ? 20 : opts->pollsize;\n \n // choose a random shard\n int shardidx = mix13(now)%nshards;\n double percent;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, shardidx,\n sweeppollop(shard, shardidx, now, pollsize, &percent);\n );\n return percent;\n}\n"], ["/pogocache/src/lz4.c", "/*\n LZ4 - Fast LZ compression algorithm\n Copyright (C) 2011-2023, Yann Collet.\n\n BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following disclaimer\n in the documentation and/or other materials provided with the\n distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n You can contact the author at :\n - LZ4 homepage : http://www.lz4.org\n - LZ4 source repository : https://github.com/lz4/lz4\n*/\n\n/*-************************************\n* Tuning parameters\n**************************************/\n/*\n * LZ4_HEAPMODE :\n * Select how stateless compression functions like `LZ4_compress_default()`\n * allocate memory for their hash table,\n * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).\n */\n#ifndef LZ4_HEAPMODE\n# define LZ4_HEAPMODE 0\n#endif\n\n/*\n * LZ4_ACCELERATION_DEFAULT :\n * Select \"acceleration\" for LZ4_compress_fast() when parameter value <= 0\n */\n#define LZ4_ACCELERATION_DEFAULT 1\n/*\n * LZ4_ACCELERATION_MAX :\n * Any \"acceleration\" value higher than this threshold\n * get treated as LZ4_ACCELERATION_MAX instead (fix #876)\n */\n#define LZ4_ACCELERATION_MAX 65537\n\n\n/*-************************************\n* CPU Feature Detection\n**************************************/\n/* LZ4_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n * It can generate buggy code on targets which assembly generation depends on alignment.\n * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */\n# if defined(__GNUC__) && \\\n ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \\\n || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n# define LZ4_FORCE_MEMORY_ACCESS 2\n# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)\n# define LZ4_FORCE_MEMORY_ACCESS 1\n# endif\n#endif\n\n/*\n * LZ4_FORCE_SW_BITCOUNT\n * Define this parameter if your target system or compiler does not support hardware bit count\n */\n#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */\n# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */\n# define LZ4_FORCE_SW_BITCOUNT\n#endif\n\n\n\n/*-************************************\n* Dependency\n**************************************/\n/*\n * LZ4_SRC_INCLUDED:\n * Amalgamation flag, whether lz4.c is included\n */\n#ifndef LZ4_SRC_INCLUDED\n# define LZ4_SRC_INCLUDED 1\n#endif\n\n#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS\n# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */\n#endif\n\n#ifndef LZ4_STATIC_LINKING_ONLY\n# define LZ4_STATIC_LINKING_ONLY\n#endif\n#include \"lz4.h\"\n/* see also \"memory routines\" below */\n\n\n/*-************************************\n* Compiler Options\n**************************************/\n#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */\n# include /* only present in VS2005+ */\n# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */\n# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */\n# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */\n#endif /* _MSC_VER */\n\n#ifndef LZ4_FORCE_INLINE\n# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */\n# define LZ4_FORCE_INLINE static __forceinline\n# else\n# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */\n# if defined (__GNUC__) || defined (__clang__)\n# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))\n# else\n# define LZ4_FORCE_INLINE static inline\n# endif\n# else\n# define LZ4_FORCE_INLINE static\n# endif /* __STDC_VERSION__ */\n# endif /* _MSC_VER */\n#endif /* LZ4_FORCE_INLINE */\n\n/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE\n * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,\n * together with a simple 8-byte copy loop as a fall-back path.\n * However, this optimization hurts the decompression speed by >30%,\n * because the execution does not go to the optimized loop\n * for typical compressible data, and all of the preamble checks\n * before going to the fall-back path become useless overhead.\n * This optimization happens only with the -O3 flag, and -O2 generates\n * a simple 8-byte copy loop.\n * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8\n * functions are annotated with __attribute__((optimize(\"O2\"))),\n * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute\n * of LZ4_wildCopy8 does not affect the compression speed.\n */\n#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)\n# define LZ4_FORCE_O2 __attribute__((optimize(\"O2\")))\n# undef LZ4_FORCE_INLINE\n# define LZ4_FORCE_INLINE static __inline __attribute__((optimize(\"O2\"),always_inline))\n#else\n# define LZ4_FORCE_O2\n#endif\n\n#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)\n# define expect(expr,value) (__builtin_expect ((expr),(value)) )\n#else\n# define expect(expr,value) (expr)\n#endif\n\n#ifndef likely\n#define likely(expr) expect((expr) != 0, 1)\n#endif\n#ifndef unlikely\n#define unlikely(expr) expect((expr) != 0, 0)\n#endif\n\n/* Should the alignment test prove unreliable, for some reason,\n * it can be disabled by setting LZ4_ALIGN_TEST to 0 */\n#ifndef LZ4_ALIGN_TEST /* can be externally provided */\n# define LZ4_ALIGN_TEST 1\n#endif\n\n\n/*-************************************\n* Memory routines\n**************************************/\n\n/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :\n * Disable relatively high-level LZ4/HC functions that use dynamic memory\n * allocation functions (malloc(), calloc(), free()).\n *\n * Note that this is a compile-time switch. And since it disables\n * public/stable LZ4 v1 API functions, we don't recommend using this\n * symbol to generate a library for distribution.\n *\n * The following public functions are removed when this symbol is defined.\n * - lz4 : LZ4_createStream, LZ4_freeStream,\n * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)\n * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,\n * LZ4_createHC (deprecated), LZ4_freeHC (deprecated)\n * - lz4frame, lz4file : All LZ4F_* functions\n */\n#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\n# define ALLOC(s) lz4_error_memory_allocation_is_disabled\n# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled\n# define FREEMEM(p) lz4_error_memory_allocation_is_disabled\n#elif defined(LZ4_USER_MEMORY_FUNCTIONS)\n/* memory management functions can be customized by user project.\n * Below functions must exist somewhere in the Project\n * and be available at link time */\nvoid* LZ4_malloc(size_t s);\nvoid* LZ4_calloc(size_t n, size_t s);\nvoid LZ4_free(void* p);\n# define ALLOC(s) LZ4_malloc(s)\n# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)\n# define FREEMEM(p) LZ4_free(p)\n#else\n# include /* malloc, calloc, free */\n# define ALLOC(s) malloc(s)\n# define ALLOC_AND_ZERO(s) calloc(1,s)\n# define FREEMEM(p) free(p)\n#endif\n\n#if ! LZ4_FREESTANDING\n# include /* memset, memcpy */\n#endif\n#if !defined(LZ4_memset)\n# define LZ4_memset(p,v,s) memset((p),(v),(s))\n#endif\n#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s))\n\n\n/*-************************************\n* Common Constants\n**************************************/\n#define MINMATCH 4\n\n#define WILDCOPYLENGTH 8\n#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */\n#define FASTLOOP_SAFE_DISTANCE 64\nstatic const int LZ4_minLength = (MFLIMIT+1);\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define LZ4_DISTANCE_ABSOLUTE_MAX 65535\n#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */\n# error \"LZ4_DISTANCE_MAX is too big : must be <= 65535\"\n#endif\n\n#define ML_BITS 4\n#define ML_MASK ((1U<=1)\n# include \n#else\n# ifndef assert\n# define assert(condition) ((void)0)\n# endif\n#endif\n\n#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */\n\n#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)\n# include \n static int g_debuglog_enable = 1;\n# define DEBUGLOG(l, ...) { \\\n if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \\\n fprintf(stderr, __FILE__ \" %i: \", __LINE__); \\\n fprintf(stderr, __VA_ARGS__); \\\n fprintf(stderr, \" \\n\"); \\\n } }\n#else\n# define DEBUGLOG(l, ...) {} /* disabled */\n#endif\n\nstatic int LZ4_isAligned(const void* ptr, size_t alignment)\n{\n return ((size_t)ptr & (alignment -1)) == 0;\n}\n\n\n/*-************************************\n* Types\n**************************************/\n#include \n#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include \n typedef uint8_t BYTE;\n typedef uint16_t U16;\n typedef uint32_t U32;\n typedef int32_t S32;\n typedef uint64_t U64;\n typedef uintptr_t uptrval;\n#else\n# if UINT_MAX != 4294967295UL\n# error \"LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4\"\n# endif\n typedef unsigned char BYTE;\n typedef unsigned short U16;\n typedef unsigned int U32;\n typedef signed int S32;\n typedef unsigned long long U64;\n typedef size_t uptrval; /* generally true, except OpenVMS-64 */\n#endif\n\n#if defined(__x86_64__)\n typedef U64 reg_t; /* 64-bits in x32 mode */\n#else\n typedef size_t reg_t; /* 32-bits in x32 mode */\n#endif\n\ntypedef enum {\n notLimited = 0,\n limitedOutput = 1,\n fillOutput = 2\n} limitedOutput_directive;\n\n\n/*-************************************\n* Reading and writing into memory\n**************************************/\n\n/**\n * LZ4 relies on memcpy with a constant size being inlined. In freestanding\n * environments, the compiler can't assume the implementation of memcpy() is\n * standard compliant, so it can't apply its specialized memcpy() inlining\n * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze\n * memcpy() as if it were standard compliant, so it can inline it in freestanding\n * environments. This is needed when decompressing the Linux Kernel, for example.\n */\n#if !defined(LZ4_memcpy)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)\n# else\n# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)\n# endif\n#endif\n\n#if !defined(LZ4_memmove)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memmove __builtin_memmove\n# else\n# define LZ4_memmove memmove\n# endif\n#endif\n\nstatic unsigned LZ4_isLittleEndian(void)\n{\n const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */\n return one.c[0];\n}\n\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))\n#elif defined(_MSC_VER)\n#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))\n#endif\n\n#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)\n/* lie to the compiler about data alignment; use with caution */\n\nstatic U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }\nstatic U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }\nstatic reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }\n\n#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\nLZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;\nLZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;\nLZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;\n\nstatic U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }\nstatic U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }\nstatic reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }\n\n#else /* safe and portable access using memcpy() */\n\nstatic U16 LZ4_read16(const void* memPtr)\n{\n U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic U32 LZ4_read32(const void* memPtr)\n{\n U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic reg_t LZ4_read_ARCH(const void* memPtr)\n{\n reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic void LZ4_write16(void* memPtr, U16 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\nstatic void LZ4_write32(void* memPtr, U32 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* LZ4_FORCE_MEMORY_ACCESS */\n\n\nstatic U16 LZ4_readLE16(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read16(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U16)((U16)p[0] | (p[1]<<8));\n }\n}\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\nstatic U32 LZ4_readLE32(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read32(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);\n }\n}\n#endif\n\nstatic void LZ4_writeLE16(void* memPtr, U16 value)\n{\n if (LZ4_isLittleEndian()) {\n LZ4_write16(memPtr, value);\n } else {\n BYTE* p = (BYTE*)memPtr;\n p[0] = (BYTE) value;\n p[1] = (BYTE)(value>>8);\n }\n}\n\n/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */\nLZ4_FORCE_INLINE\nvoid LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */\nLZ4_FORCE_INLINE void\nLZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH\n * - there is at least 12 bytes available to write after dstEnd */\nLZ4_FORCE_INLINE void\nLZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)\n{\n BYTE v[8];\n\n assert(dstEnd >= dstPtr + MINMATCH);\n\n switch(offset) {\n case 1:\n MEM_INIT(v, *srcPtr, 8);\n break;\n case 2:\n LZ4_memcpy(v, srcPtr, 2);\n LZ4_memcpy(&v[2], srcPtr, 2);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(push)\n# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */\n#endif\n LZ4_memcpy(&v[4], v, 4);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(pop)\n#endif\n break;\n case 4:\n LZ4_memcpy(v, srcPtr, 4);\n LZ4_memcpy(&v[4], srcPtr, 4);\n break;\n default:\n LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);\n return;\n }\n\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n while (dstPtr < dstEnd) {\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n }\n}\n#endif\n\n\n/*-************************************\n* Common functions\n**************************************/\nstatic unsigned LZ4_NbCommonBytes (reg_t val)\n{\n assert(val != 0);\n if (LZ4_isLittleEndian()) {\n if (sizeof(val) == 8) {\n# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)\n/*-*************************************************************************************************\n* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.\n* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics\n* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.\n****************************************************************************************************/\n# if defined(__clang__) && (__clang_major__ < 10)\n /* Avoid undefined clang-cl intrinsics issue.\n * See https://github.com/lz4/lz4/pull/1017 for details. */\n return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;\n# else\n /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */\n return (unsigned)_tzcnt_u64(val) >> 3;\n# endif\n# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r = 0;\n _BitScanForward64(&r, (U64)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctzll((U64)val) >> 3;\n# else\n const U64 m = 0x0101010101010101ULL;\n val ^= val - 1;\n return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);\n# endif\n } else /* 32 bits */ {\n# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r;\n _BitScanForward(&r, (U32)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctz((U32)val) >> 3;\n# else\n const U32 m = 0x01010101;\n return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;\n# endif\n }\n } else /* Big Endian CPU */ {\n if (sizeof(val)==8) {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clzll((U64)val) >> 3;\n# else\n#if 1\n /* this method is probably faster,\n * but adds a 128 bytes lookup table */\n static const unsigned char ctz7_tab[128] = {\n 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n };\n U64 const mask = 0x0101010101010101ULL;\n U64 const t = (((val >> 8) - mask) | val) & mask;\n return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];\n#else\n /* this method doesn't consume memory space like the previous one,\n * but it contains several branches,\n * that may end up slowing execution */\n static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.\n Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.\n Note that this code path is never triggered in 32-bits mode. */\n unsigned r;\n if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }\n if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\n r += (!val);\n return r;\n#endif\n# endif\n } else /* 32 bits */ {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clz((U32)val) >> 3;\n# else\n val >>= 8;\n val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |\n (val + 0x00FF0000)) >> 24;\n return (unsigned)val ^ 3;\n# endif\n }\n }\n}\n\n\n#define STEPSIZE sizeof(reg_t)\nLZ4_FORCE_INLINE\nunsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)\n{\n const BYTE* const pStart = pIn;\n\n if (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) {\n pIn+=STEPSIZE; pMatch+=STEPSIZE;\n } else {\n return LZ4_NbCommonBytes(diff);\n } }\n\n while (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }\n pIn += LZ4_NbCommonBytes(diff);\n return (unsigned)(pIn - pStart);\n }\n\n if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }\n if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }\n if ((pIn compression run slower on incompressible data */\n\n\n/*-************************************\n* Local Structures and types\n**************************************/\ntypedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;\n\n/**\n * This enum distinguishes several different modes of accessing previous\n * content in the stream.\n *\n * - noDict : There is no preceding content.\n * - withPrefix64k : Table entries up to ctx->dictSize before the current blob\n * blob being compressed are valid and refer to the preceding\n * content (of length ctx->dictSize), which is available\n * contiguously preceding in memory the content currently\n * being compressed.\n * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere\n * else in memory, starting at ctx->dictionary with length\n * ctx->dictSize.\n * - usingDictCtx : Everything concerning the preceding content is\n * in a separate context, pointed to by ctx->dictCtx.\n * ctx->dictionary, ctx->dictSize, and table entries\n * in the current context that refer to positions\n * preceding the beginning of the current compression are\n * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx\n * ->dictSize describe the location and size of the preceding\n * content, and matches are found by looking in the ctx\n * ->dictCtx->hashTable.\n */\ntypedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;\ntypedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;\n\n\n/*-************************************\n* Local Utils\n**************************************/\nint LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }\nconst char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }\nint LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }\nint LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }\n\n\n/*-****************************************\n* Internal Definitions, used only in Tests\n*******************************************/\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);\n\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize);\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize);\n#if defined (__cplusplus)\n}\n#endif\n\n/*-******************************\n* Compression functions\n********************************/\nLZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)\n{\n if (tableType == byU16)\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));\n else\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)\n{\n const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;\n if (LZ4_isLittleEndian()) {\n const U64 prime5bytes = 889523592379ULL;\n return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));\n } else {\n const U64 prime8bytes = 11400714785074694791ULL;\n return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));\n }\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)\n{\n if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\n return LZ4_hash4(LZ4_readLE32(p), tableType);\n#else\n return LZ4_hash4(LZ4_read32(p), tableType);\n#endif\n}\n\nLZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: { /* illegal! */ assert(0); return; }\n case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }\n }\n}\n\nLZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: /* fallthrough */\n case byPtr: { /* illegal! */ assert(0); return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }\n }\n}\n\n/* LZ4_putPosition*() : only used in byPtr mode */\nLZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,\n void* tableBase, tableType_t const tableType)\n{\n const BYTE** const hashTable = (const BYTE**)tableBase;\n assert(tableType == byPtr); (void)tableType;\n hashTable[h] = p;\n}\n\nLZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n LZ4_putPositionOnHash(p, h, tableBase, tableType);\n}\n\n/* LZ4_getIndexOnHash() :\n * Index of match position registered in hash table.\n * hash position must be calculated by using base+index, or dictBase+index.\n * Assumption 1 : only valid if tableType == byU32 or byU16.\n * Assumption 2 : h is presumed valid (within limits of hash table)\n */\nLZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);\n if (tableType == byU32) {\n const U32* const hashTable = (const U32*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-2)));\n return hashTable[h];\n }\n if (tableType == byU16) {\n const U16* const hashTable = (const U16*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-1)));\n return hashTable[h];\n }\n assert(0); return 0; /* forbidden case */\n}\n\nstatic const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n assert(tableType == byPtr); (void)tableType;\n { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }\n}\n\nLZ4_FORCE_INLINE const BYTE*\nLZ4_getPosition(const BYTE* p,\n const void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n return LZ4_getPositionOnHash(h, tableBase, tableType);\n}\n\nLZ4_FORCE_INLINE void\nLZ4_prepareTable(LZ4_stream_t_internal* const cctx,\n const int inputSize,\n const tableType_t tableType) {\n /* If the table hasn't been used, it's guaranteed to be zeroed out, and is\n * therefore safe to use no matter what mode we're in. Otherwise, we figure\n * out if it's safe to leave as is or whether it needs to be reset.\n */\n if ((tableType_t)cctx->tableType != clearedTable) {\n assert(inputSize >= 0);\n if ((tableType_t)cctx->tableType != tableType\n || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)\n || ((tableType == byU32) && cctx->currentOffset > 1 GB)\n || tableType == byPtr\n || inputSize >= 4 KB)\n {\n DEBUGLOG(4, \"LZ4_prepareTable: Resetting table in %p\", cctx);\n MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);\n cctx->currentOffset = 0;\n cctx->tableType = (U32)clearedTable;\n } else {\n DEBUGLOG(4, \"LZ4_prepareTable: Re-use hash table (no reset)\");\n }\n }\n\n /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,\n * is faster than compressing without a gap.\n * However, compressing with currentOffset == 0 is faster still,\n * so we preserve that case.\n */\n if (cctx->currentOffset != 0 && tableType == byU32) {\n DEBUGLOG(5, \"LZ4_prepareTable: adding 64KB to currentOffset\");\n cctx->currentOffset += 64 KB;\n }\n\n /* Finally, clear history */\n cctx->dictCtx = NULL;\n cctx->dictionary = NULL;\n cctx->dictSize = 0;\n}\n\n/** LZ4_compress_generic_validated() :\n * inlined, to ensure branches are decided at compilation time.\n * The following conditions are presumed already validated:\n * - source != NULL\n * - inputSize > 0\n */\nLZ4_FORCE_INLINE int LZ4_compress_generic_validated(\n LZ4_stream_t_internal* const cctx,\n const char* const source,\n char* const dest,\n const int inputSize,\n int* inputConsumed, /* only written when outputDirective == fillOutput */\n const int maxOutputSize,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n int result;\n const BYTE* ip = (const BYTE*)source;\n\n U32 const startIndex = cctx->currentOffset;\n const BYTE* base = (const BYTE*)source - startIndex;\n const BYTE* lowLimit;\n\n const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;\n const BYTE* const dictionary =\n dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;\n const U32 dictSize =\n dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;\n const U32 dictDelta =\n (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */\n\n int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);\n U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */\n const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;\n const BYTE* anchor = (const BYTE*) source;\n const BYTE* const iend = ip + inputSize;\n const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;\n const BYTE* const matchlimit = iend - LASTLITERALS;\n\n /* the dictCtx currentOffset is indexed on the start of the dictionary,\n * while a dictionary in the current context precedes the currentOffset */\n const BYTE* dictBase = (dictionary == NULL) ? NULL :\n (dictDirective == usingDictCtx) ?\n dictionary + dictSize - dictCtx->currentOffset :\n dictionary + dictSize - startIndex;\n\n BYTE* op = (BYTE*) dest;\n BYTE* const olimit = op + maxOutputSize;\n\n U32 offset = 0;\n U32 forwardH;\n\n DEBUGLOG(5, \"LZ4_compress_generic_validated: srcSize=%i, tableType=%u\", inputSize, tableType);\n assert(ip != NULL);\n if (tableType == byU16) assert(inputSize= 1);\n\n lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);\n\n /* Update context state */\n if (dictDirective == usingDictCtx) {\n /* Subsequent linked blocks can't use the dictionary. */\n /* Instead, they use the block we just compressed. */\n cctx->dictCtx = NULL;\n cctx->dictSize = (U32)inputSize;\n } else {\n cctx->dictSize += (U32)inputSize;\n }\n cctx->currentOffset += (U32)inputSize;\n cctx->tableType = (U32)tableType;\n\n if (inputSizehashTable, byPtr);\n } else {\n LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);\n } }\n ip++; forwardH = LZ4_hashPosition(ip, tableType);\n\n /* Main Loop */\n for ( ; ; ) {\n const BYTE* match;\n BYTE* token;\n const BYTE* filledIp;\n\n /* Find a match */\n if (tableType == byPtr) {\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);\n\n } while ( (match+LZ4_DISTANCE_MAX < ip)\n || (LZ4_read32(match) != LZ4_read32(ip)) );\n\n } else { /* byU32, byU16 */\n\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n U32 const current = (U32)(forwardIp - base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex <= current);\n assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n matchIndex += dictDelta; /* make dictCtx index comparable with current context */\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else if (dictDirective == usingExtDict) {\n if (matchIndex < startIndex) {\n DEBUGLOG(7, \"extDict candidate: matchIndex=%5u < startIndex=%5u\", matchIndex, startIndex);\n assert(startIndex - matchIndex >= MINMATCH);\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else { /* single continuous memory segment */\n match = base + matchIndex;\n }\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n\n DEBUGLOG(7, \"candidate at pos=%u (offset=%u \\n\", matchIndex, current - matchIndex);\n if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */\n assert(matchIndex < current);\n if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))\n && (matchIndex+LZ4_DISTANCE_MAX < current)) {\n continue;\n } /* too far */\n assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */\n\n if (LZ4_read32(match) == LZ4_read32(ip)) {\n if (maybe_extMem) offset = current - matchIndex;\n break; /* match found */\n }\n\n } while(1);\n }\n\n /* Catch up */\n filledIp = ip;\n assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */\n if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {\n do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));\n }\n\n /* Encode Literals */\n { unsigned const litLength = (unsigned)(ip - anchor);\n token = op++;\n if ((outputDirective == limitedOutput) && /* Check output buffer overflow */\n (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n if ((outputDirective == fillOutput) &&\n (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {\n op--;\n goto _last_literals;\n }\n if (litLength >= RUN_MASK) {\n unsigned len = litLength - RUN_MASK;\n *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255;\n *op++ = (BYTE)len;\n }\n else *token = (BYTE)(litLength< olimit)) {\n /* the match was too close to the end, rewind and go to last literals */\n op = token;\n goto _last_literals;\n }\n\n /* Encode Offset */\n if (maybe_extMem) { /* static test */\n DEBUGLOG(6, \" with offset=%u (ext if > %i)\", offset, (int)(ip - (const BYTE*)source));\n assert(offset <= LZ4_DISTANCE_MAX && offset > 0);\n LZ4_writeLE16(op, (U16)offset); op+=2;\n } else {\n DEBUGLOG(6, \" with offset=%u (same segment)\", (U32)(ip - match));\n assert(ip-match <= LZ4_DISTANCE_MAX);\n LZ4_writeLE16(op, (U16)(ip - match)); op+=2;\n }\n\n /* Encode MatchLength */\n { unsigned matchCode;\n\n if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)\n && (lowLimit==dictionary) /* match within extDict */ ) {\n const BYTE* limit = ip + (dictEnd-match);\n assert(dictEnd > match);\n if (limit > matchlimit) limit = matchlimit;\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);\n ip += (size_t)matchCode + MINMATCH;\n if (ip==limit) {\n unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);\n matchCode += more;\n ip += more;\n }\n DEBUGLOG(6, \" with matchLength=%u starting in extDict\", matchCode+MINMATCH);\n } else {\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);\n ip += (size_t)matchCode + MINMATCH;\n DEBUGLOG(6, \" with matchLength=%u\", matchCode+MINMATCH);\n }\n\n if ((outputDirective) && /* Check output buffer overflow */\n (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {\n if (outputDirective == fillOutput) {\n /* Match description too long : reduce it */\n U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;\n ip -= matchCode - newMatchCode;\n assert(newMatchCode < matchCode);\n matchCode = newMatchCode;\n if (unlikely(ip <= filledIp)) {\n /* We have already filled up to filledIp so if ip ends up less than filledIp\n * we have positions in the hash table beyond the current position. This is\n * a problem if we reuse the hash table. So we have to remove these positions\n * from the hash table.\n */\n const BYTE* ptr;\n DEBUGLOG(5, \"Clearing %u positions\", (U32)(filledIp - ip));\n for (ptr = ip; ptr <= filledIp; ++ptr) {\n U32 const h = LZ4_hashPosition(ptr, tableType);\n LZ4_clearHash(h, cctx->hashTable, tableType);\n }\n }\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n if (matchCode >= ML_MASK) {\n *token += ML_MASK;\n matchCode -= ML_MASK;\n LZ4_write32(op, 0xFFFFFFFF);\n while (matchCode >= 4*255) {\n op+=4;\n LZ4_write32(op, 0xFFFFFFFF);\n matchCode -= 4*255;\n }\n op += matchCode / 255;\n *op++ = (BYTE)(matchCode % 255);\n } else\n *token += (BYTE)(matchCode);\n }\n /* Ensure we have enough space for the last literals. */\n assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));\n\n anchor = ip;\n\n /* Test end of chunk */\n if (ip >= mflimitPlusOne) break;\n\n /* Fill table */\n { U32 const h = LZ4_hashPosition(ip-2, tableType);\n if (tableType == byPtr) {\n LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);\n } else {\n U32 const idx = (U32)((ip-2) - base);\n LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);\n } }\n\n /* Test next position */\n if (tableType == byPtr) {\n\n match = LZ4_getPosition(ip, cctx->hashTable, tableType);\n LZ4_putPosition(ip, cctx->hashTable, tableType);\n if ( (match+LZ4_DISTANCE_MAX >= ip)\n && (LZ4_read32(match) == LZ4_read32(ip)) )\n { token=op++; *token=0; goto _next_match; }\n\n } else { /* byU32, byU16 */\n\n U32 const h = LZ4_hashPosition(ip, tableType);\n U32 const current = (U32)(ip-base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n matchIndex += dictDelta;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else if (dictDirective==usingExtDict) {\n if (matchIndex < startIndex) {\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else { /* single memory segment */\n match = base + matchIndex;\n }\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)\n && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))\n && (LZ4_read32(match) == LZ4_read32(ip)) ) {\n token=op++;\n *token=0;\n if (maybe_extMem) offset = current - matchIndex;\n DEBUGLOG(6, \"seq.start:%i, literals=%u, match.start:%i\",\n (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));\n goto _next_match;\n }\n }\n\n /* Prepare next loop */\n forwardH = LZ4_hashPosition(++ip, tableType);\n\n }\n\n_last_literals:\n /* Encode Last Literals */\n { size_t lastRun = (size_t)(iend - anchor);\n if ( (outputDirective) && /* Check output buffer overflow */\n (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {\n if (outputDirective == fillOutput) {\n /* adapt lastRun to fill 'dst' */\n assert(olimit >= op);\n lastRun = (size_t)(olimit-op) - 1/*token*/;\n lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n DEBUGLOG(6, \"Final literal run : %i literals\", (int)lastRun);\n if (lastRun >= RUN_MASK) {\n size_t accumulator = lastRun - RUN_MASK;\n *op++ = RUN_MASK << ML_BITS;\n for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;\n *op++ = (BYTE) accumulator;\n } else {\n *op++ = (BYTE)(lastRun< 0);\n DEBUGLOG(5, \"LZ4_compress_generic: compressed %i bytes into %i bytes\", inputSize, result);\n return result;\n}\n\n/** LZ4_compress_generic() :\n * inlined, to ensure branches are decided at compilation time;\n * takes care of src == (NULL, 0)\n * and forward the rest to LZ4_compress_generic_validated */\nLZ4_FORCE_INLINE int LZ4_compress_generic(\n LZ4_stream_t_internal* const cctx,\n const char* const src,\n char* const dst,\n const int srcSize,\n int *inputConsumed, /* only written when outputDirective == fillOutput */\n const int dstCapacity,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n DEBUGLOG(5, \"LZ4_compress_generic: srcSize=%i, dstCapacity=%i\",\n srcSize, dstCapacity);\n\n if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */\n if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */\n if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */\n DEBUGLOG(5, \"Generating an empty block\");\n assert(outputDirective == notLimited || dstCapacity >= 1);\n assert(dst != NULL);\n dst[0] = 0;\n if (outputDirective == fillOutput) {\n assert (inputConsumed != NULL);\n *inputConsumed = 0;\n }\n return 1;\n }\n assert(src != NULL);\n\n return LZ4_compress_generic_validated(cctx, src, dst, srcSize,\n inputConsumed, /* only written into if outputDirective == fillOutput */\n dstCapacity, outputDirective,\n tableType, dictDirective, dictIssue, acceleration);\n}\n\n\nint LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;\n assert(ctx != NULL);\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n if (maxOutputSize >= LZ4_compressBound(inputSize)) {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n/**\n * LZ4_compress_fast_extState_fastReset() :\n * A variant of LZ4_compress_fast_extState().\n *\n * Using this variant avoids an expensive initialization step. It is only safe\n * to call if the state buffer is known to be correctly initialized already\n * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of\n * \"correctly initialized\").\n */\nint LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n assert(ctx != NULL);\n\n if (dstCapacity >= LZ4_compressBound(srcSize)) {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n\nint LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)\n{\n int result;\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctxPtr == NULL) return 0;\n#else\n LZ4_stream_t ctx;\n LZ4_stream_t* const ctxPtr = &ctx;\n#endif\n result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctxPtr);\n#endif\n return result;\n}\n\n\nint LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);\n}\n\n\n/* Note!: This function leaves the stream in an unclean/broken state!\n * It is not safe to subsequently use the same state with a _fastReset() or\n * _continue() call without resetting it. */\nstatic int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n void* const s = LZ4_initStream(state, sizeof (*state));\n assert(s != NULL); (void)s;\n\n if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */\n return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);\n } else {\n if (*srcSizePtr < LZ4_64Klimit) {\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);\n } }\n}\n\nint LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);\n /* clean the state on exit */\n LZ4_initStream(state, sizeof (LZ4_stream_t));\n return r;\n}\n\n\nint LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)\n{\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctx == NULL) return 0;\n#else\n LZ4_stream_t ctxBody;\n LZ4_stream_t* const ctx = &ctxBody;\n#endif\n\n int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctx);\n#endif\n return result;\n}\n\n\n\n/*-******************************\n* Streaming functions\n********************************/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_stream_t* LZ4_createStream(void)\n{\n LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));\n LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));\n DEBUGLOG(4, \"LZ4_createStream %p\", lz4s);\n if (lz4s == NULL) return NULL;\n LZ4_initStream(lz4s, sizeof(*lz4s));\n return lz4s;\n}\n#endif\n\nstatic size_t LZ4_stream_t_alignment(void)\n{\n#if LZ4_ALIGN_TEST\n typedef struct { char c; LZ4_stream_t t; } t_a;\n return sizeof(t_a) - sizeof(LZ4_stream_t);\n#else\n return 1; /* effectively disabled */\n#endif\n}\n\nLZ4_stream_t* LZ4_initStream (void* buffer, size_t size)\n{\n DEBUGLOG(5, \"LZ4_initStream\");\n if (buffer == NULL) { return NULL; }\n if (size < sizeof(LZ4_stream_t)) { return NULL; }\n if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;\n MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));\n return (LZ4_stream_t*)buffer;\n}\n\n/* resetStream is now deprecated,\n * prefer initStream() which is more general */\nvoid LZ4_resetStream (LZ4_stream_t* LZ4_stream)\n{\n DEBUGLOG(5, \"LZ4_resetStream (ctx:%p)\", LZ4_stream);\n MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));\n}\n\nvoid LZ4_resetStream_fast(LZ4_stream_t* ctx) {\n LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nint LZ4_freeStream (LZ4_stream_t* LZ4_stream)\n{\n if (!LZ4_stream) return 0; /* support free on NULL */\n DEBUGLOG(5, \"LZ4_freeStream %p\", LZ4_stream);\n FREEMEM(LZ4_stream);\n return (0);\n}\n#endif\n\n\ntypedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;\n#define HASH_UNIT sizeof(reg_t)\nint LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,\n const char* dictionary, int dictSize,\n LoadDict_mode_e _ld)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n const tableType_t tableType = byU32;\n const BYTE* p = (const BYTE*)dictionary;\n const BYTE* const dictEnd = p + dictSize;\n U32 idx32;\n\n DEBUGLOG(4, \"LZ4_loadDict (%i bytes from %p into %p)\", dictSize, dictionary, LZ4_dict);\n\n /* It's necessary to reset the context,\n * and not just continue it with prepareTable()\n * to avoid any risk of generating overflowing matchIndex\n * when compressing using this dictionary */\n LZ4_resetStream(LZ4_dict);\n\n /* We always increment the offset by 64 KB, since, if the dict is longer,\n * we truncate it to the last 64k, and if it's shorter, we still want to\n * advance by a whole window length so we can provide the guarantee that\n * there are only valid offsets in the window, which allows an optimization\n * in LZ4_compress_fast_continue() where it uses noDictIssue even when the\n * dictionary isn't a full 64k. */\n dict->currentOffset += 64 KB;\n\n if (dictSize < (int)HASH_UNIT) {\n return 0;\n }\n\n if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;\n dict->dictionary = p;\n dict->dictSize = (U32)(dictEnd - p);\n dict->tableType = (U32)tableType;\n idx32 = dict->currentOffset - dict->dictSize;\n\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n /* Note: overwriting => favors positions end of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n p+=3; idx32+=3;\n }\n\n if (_ld == _ld_slow) {\n /* Fill hash table with additional references, to improve compression capability */\n p = dict->dictionary;\n idx32 = dict->currentOffset - dict->dictSize;\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n U32 const limit = dict->currentOffset - 64 KB;\n if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {\n /* Note: not overwriting => favors positions beginning of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n }\n p++; idx32++;\n }\n }\n\n return (int)dict->dictSize;\n}\n\nint LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);\n}\n\nint LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);\n}\n\nvoid LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)\n{\n const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :\n &(dictionaryStream->internal_donotuse);\n\n DEBUGLOG(4, \"LZ4_attach_dictionary (%p, %p, size %u)\",\n workingStream, dictionaryStream,\n dictCtx != NULL ? dictCtx->dictSize : 0);\n\n if (dictCtx != NULL) {\n /* If the current offset is zero, we will never look in the\n * external dictionary context, since there is no value a table\n * entry can take that indicate a miss. In that case, we need\n * to bump the offset to something non-zero.\n */\n if (workingStream->internal_donotuse.currentOffset == 0) {\n workingStream->internal_donotuse.currentOffset = 64 KB;\n }\n\n /* Don't actually attach an empty dictionary.\n */\n if (dictCtx->dictSize == 0) {\n dictCtx = NULL;\n }\n }\n workingStream->internal_donotuse.dictCtx = dictCtx;\n}\n\n\nstatic void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)\n{\n assert(nextSize >= 0);\n if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */\n /* rescale hash table */\n U32 const delta = LZ4_dict->currentOffset - 64 KB;\n const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;\n int i;\n DEBUGLOG(4, \"LZ4_renormDictT\");\n for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0;\n else LZ4_dict->hashTable[i] -= delta;\n }\n LZ4_dict->currentOffset = 64 KB;\n if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;\n LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;\n }\n}\n\n\nint LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,\n const char* source, char* dest,\n int inputSize, int maxOutputSize,\n int acceleration)\n{\n const tableType_t tableType = byU32;\n LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;\n const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;\n\n DEBUGLOG(5, \"LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)\", inputSize, streamPtr->dictSize);\n\n LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n\n /* invalidate tiny dictionaries */\n if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */\n && (dictEnd != source) /* prefix mode */\n && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */\n && (streamPtr->dictCtx == NULL) /* usingDictCtx */\n ) {\n DEBUGLOG(5, \"LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small\", streamPtr->dictSize, streamPtr->dictionary);\n /* remove dictionary existence from history, to employ faster prefix mode */\n streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)source;\n dictEnd = source;\n }\n\n /* Check overlapping input/dictionary space */\n { const char* const sourceEnd = source + inputSize;\n if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {\n streamPtr->dictSize = (U32)(dictEnd - sourceEnd);\n if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;\n if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;\n }\n }\n\n /* prefix mode : source data follows dictionary */\n if (dictEnd == source) {\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);\n else\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);\n }\n\n /* external dictionary mode */\n { int result;\n if (streamPtr->dictCtx) {\n /* We depend here on the fact that dictCtx'es (produced by\n * LZ4_loadDict) guarantee that their tables contain no references\n * to offsets between dictCtx->currentOffset - 64 KB and\n * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe\n * to use noDictIssue even when the dict isn't a full 64 KB.\n */\n if (inputSize > 4 KB) {\n /* For compressing large blobs, it is faster to pay the setup\n * cost to copy the dictionary's tables into the active context,\n * so that the compression loop is only looking into one table.\n */\n LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);\n }\n } else { /* small data <= 4 KB */\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n }\n }\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)inputSize;\n return result;\n }\n}\n\n\n/* Hidden debug function, to force-test external dictionary mode */\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;\n int result;\n\n LZ4_renormDictT(streamPtr, srcSize);\n\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);\n }\n\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)srcSize;\n\n return result;\n}\n\n\n/*! LZ4_saveDict() :\n * If previously compressed data block is not guaranteed to remain available at its memory location,\n * save it into a safer place (char* safeBuffer).\n * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,\n * one can therefore call LZ4_compress_fast_continue() right after.\n * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.\n */\nint LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n\n DEBUGLOG(5, \"LZ4_saveDict : dictSize=%i, safeBuffer=%p\", dictSize, safeBuffer);\n\n if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */\n if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }\n\n if (safeBuffer == NULL) assert(dictSize == 0);\n if (dictSize > 0) {\n const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;\n assert(dict->dictionary);\n LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);\n }\n\n dict->dictionary = (const BYTE*)safeBuffer;\n dict->dictSize = (U32)dictSize;\n\n return dictSize;\n}\n\n\n\n/*-*******************************\n * Decompression functions\n ********************************/\n\ntypedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;\n\n#undef MIN\n#define MIN(a,b) ( (a) < (b) ? (a) : (b) )\n\n\n/* variant for decompress_unsafe()\n * does not know end of input\n * presumes input is well formed\n * note : will consume at least one byte */\nstatic size_t read_long_length_no_check(const BYTE** pp)\n{\n size_t b, l = 0;\n do { b = **pp; (*pp)++; l += b; } while (b==255);\n DEBUGLOG(6, \"read_long_length_no_check: +length=%zu using %zu input bytes\", l, l/255 + 1)\n return l;\n}\n\n/* core decoder variant for LZ4_decompress_fast*()\n * for legacy support only : these entry points are deprecated.\n * - Presumes input is correctly formed (no defense vs malformed inputs)\n * - Does not know input size (presume input buffer is \"large enough\")\n * - Decompress a full block (only)\n * @return : nb of bytes read from input.\n * Note : this variant is not optimized for speed, just for maintenance.\n * the goal is to remove support of decompress_fast*() variants by v2.0\n**/\nLZ4_FORCE_INLINE int\nLZ4_decompress_unsafe_generic(\n const BYTE* const istart,\n BYTE* const ostart,\n int decompressedSize,\n\n size_t prefixSize,\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note: =0 if dictStart==NULL */\n )\n{\n const BYTE* ip = istart;\n BYTE* op = (BYTE*)ostart;\n BYTE* const oend = ostart + decompressedSize;\n const BYTE* const prefixStart = ostart - prefixSize;\n\n DEBUGLOG(5, \"LZ4_decompress_unsafe_generic\");\n if (dictStart == NULL) assert(dictSize == 0);\n\n while (1) {\n /* start new sequence */\n unsigned token = *ip++;\n\n /* literals */\n { size_t ll = token >> ML_BITS;\n if (ll==15) {\n /* long literal length */\n ll += read_long_length_no_check(&ip);\n }\n if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */\n LZ4_memmove(op, ip, ll); /* support in-place decompression */\n op += ll;\n ip += ll;\n if ((size_t)(oend-op) < MFLIMIT) {\n if (op==oend) break; /* end of block */\n DEBUGLOG(5, \"invalid: literals end at distance %zi from end of block\", oend-op);\n /* incorrect end of block :\n * last match must start at least MFLIMIT==12 bytes before end of output block */\n return -1;\n } }\n\n /* match */\n { size_t ml = token & 15;\n size_t const offset = LZ4_readLE16(ip);\n ip+=2;\n\n if (ml==15) {\n /* long literal length */\n ml += read_long_length_no_check(&ip);\n }\n ml += MINMATCH;\n\n if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */\n\n { const BYTE* match = op - offset;\n\n /* out of range */\n if (offset > (size_t)(op - prefixStart) + dictSize) {\n DEBUGLOG(6, \"offset out of range\");\n return -1;\n }\n\n /* check special case : extDict */\n if (offset > (size_t)(op - prefixStart)) {\n /* extDict scenario */\n const BYTE* const dictEnd = dictStart + dictSize;\n const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));\n size_t const extml = (size_t)(dictEnd - extMatch);\n if (extml > ml) {\n /* match entirely within extDict */\n LZ4_memmove(op, extMatch, ml);\n op += ml;\n ml = 0;\n } else {\n /* match split between extDict & prefix */\n LZ4_memmove(op, extMatch, extml);\n op += extml;\n ml -= extml;\n }\n match = prefixStart;\n }\n\n /* match copy - slow variant, supporting overlap copy */\n { size_t u;\n for (u=0; u= ipmax before start of loop. Returns initial_error if so.\n * @error (output) - error code. Must be set to 0 before call.\n**/\ntypedef size_t Rvl_t;\nstatic const Rvl_t rvl_error = (Rvl_t)(-1);\nLZ4_FORCE_INLINE Rvl_t\nread_variable_length(const BYTE** ip, const BYTE* ilimit,\n int initial_check)\n{\n Rvl_t s, length = 0;\n assert(ip != NULL);\n assert(*ip != NULL);\n assert(ilimit != NULL);\n if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */\n return rvl_error;\n }\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n if (likely(s != 255)) return length;\n do {\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n } while (s == 255);\n\n return length;\n}\n\n/*! LZ4_decompress_generic() :\n * This generic decompression function covers all use cases.\n * It shall be instantiated several times, using different sets of directives.\n * Note that it is important for performance that this function really get inlined,\n * in order to remove useless branches during compilation optimization.\n */\nLZ4_FORCE_INLINE int\nLZ4_decompress_generic(\n const char* const src,\n char* const dst,\n int srcSize,\n int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */\n\n earlyEnd_directive partialDecoding, /* full, partial */\n dict_directive dict, /* noDict, withPrefix64k, usingExtDict */\n const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note : = 0 if noDict */\n )\n{\n if ((src == NULL) || (outputSize < 0)) { return -1; }\n\n { const BYTE* ip = (const BYTE*) src;\n const BYTE* const iend = ip + srcSize;\n\n BYTE* op = (BYTE*) dst;\n BYTE* const oend = op + outputSize;\n BYTE* cpy;\n\n const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;\n\n const int checkOffset = (dictSize < (int)(64 KB));\n\n\n /* Set up the \"end\" pointers for the shortcut. */\n const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;\n const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;\n\n const BYTE* match;\n size_t offset;\n unsigned token;\n size_t length;\n\n\n DEBUGLOG(5, \"LZ4_decompress_generic (srcSize:%i, dstSize:%i)\", srcSize, outputSize);\n\n /* Special cases */\n assert(lowPrefix <= op);\n if (unlikely(outputSize==0)) {\n /* Empty output buffer */\n if (partialDecoding) return 0;\n return ((srcSize==1) && (*ip==0)) ? 0 : -1;\n }\n if (unlikely(srcSize==0)) { return -1; }\n\n /* LZ4_FAST_DEC_LOOP:\n * designed for modern OoO performance cpus,\n * where copying reliably 32-bytes is preferable to an unpredictable branch.\n * note : fast loop may show a regression for some client arm chips. */\n#if LZ4_FAST_DEC_LOOP\n if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(6, \"move to safe decode loop\");\n goto safe_decode;\n }\n\n /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using fast decode loop\");\n while (1) {\n /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */\n assert(oend - op >= FASTLOOP_SAFE_DISTANCE);\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) {\n DEBUGLOG(6, \"error reading long literal length\");\n goto _output_error;\n }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n\n /* copy literals */\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }\n LZ4_wildCopy32(op, ip, op+length);\n ip += length; op += length;\n } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {\n /* We don't need to check oend, since we check it once for each loop below */\n DEBUGLOG(7, \"copy %u bytes in a 16-bytes stripe\", (unsigned)length);\n /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */\n LZ4_memcpy(op, ip, 16);\n ip += length; op += length;\n } else {\n goto safe_literal_copy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n DEBUGLOG(6, \"blockPos%6u: offset = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)offset);\n match = op - offset;\n assert(match <= op); /* overflow check */\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \" match length token = %u (len==%u)\", (unsigned)length, (unsigned)length+MINMATCH);\n\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) {\n DEBUGLOG(5, \"error reading long match length\");\n goto _output_error;\n }\n length += addl;\n length += MINMATCH;\n DEBUGLOG(7, \" long match length == %u\", (unsigned)length);\n if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n goto safe_match_copy;\n }\n } else {\n length += MINMATCH;\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(7, \"moving to safe_match_copy (ml==%u)\", (unsigned)length);\n goto safe_match_copy;\n }\n\n /* Fastpath check: skip LZ4_wildCopy32 when true */\n if ((dict == withPrefix64k) || (match >= lowPrefix)) {\n if (offset >= 8) {\n assert(match >= lowPrefix);\n assert(match <= op);\n assert(op + 18 <= oend);\n\n LZ4_memcpy(op, match, 8);\n LZ4_memcpy(op+8, match+8, 8);\n LZ4_memcpy(op+16, match+16, 2);\n op += length;\n continue;\n } } }\n\n if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {\n DEBUGLOG(5, \"Error : pos=%zi, offset=%zi => outside buffers\", op-lowPrefix, op-match);\n goto _output_error;\n }\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) {\n DEBUGLOG(7, \"partialDecoding: dictionary match, close to dstEnd\");\n length = MIN(length, (size_t)(oend-op));\n } else {\n DEBUGLOG(6, \"end-of-block condition violated\")\n goto _output_error;\n } }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) { *op++ = *copyFrom++; }\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n\n /* copy match within block */\n cpy = op + length;\n\n assert((op <= oend) && (oend-op >= 32));\n if (unlikely(offset<16)) {\n LZ4_memcpy_using_offset(op, match, cpy, offset);\n } else {\n LZ4_wildCopy32(op, match, cpy);\n }\n\n op = cpy; /* wildcopy correction */\n }\n safe_decode:\n#endif\n\n /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using safe decode loop\");\n while (1) {\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* A two-stage shortcut for the most common case:\n * 1) If the literal length is 0..14, and there is enough space,\n * enter the shortcut and copy 16 bytes on behalf of the literals\n * (in the fast mode, only 8 bytes can be safely copied this way).\n * 2) Further if the match length is 4..18, copy 18 bytes in a similar\n * manner; but we ensure that there's enough space in the output for\n * those 18 bytes earlier, upon entering the shortcut (in other words,\n * there is a combined check for both stages).\n */\n if ( (length != RUN_MASK)\n /* strictly \"less than\" on input, to re-enter the loop with at least one byte */\n && likely((ip < shortiend) & (op <= shortoend)) ) {\n /* Copy the literals */\n LZ4_memcpy(op, ip, 16);\n op += length; ip += length;\n\n /* The second stage: prepare for match copying, decode full info.\n * If it doesn't work out, the info won't be wasted. */\n length = token & ML_MASK; /* match length */\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u (len=%u)\", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);\n offset = LZ4_readLE16(ip); ip += 2;\n match = op - offset;\n assert(match <= op); /* check overflow */\n\n /* Do not deal with overlapping matches. */\n if ( (length != ML_MASK)\n && (offset >= 8)\n && (dict==withPrefix64k || match >= lowPrefix) ) {\n /* Copy the match. */\n LZ4_memcpy(op + 0, match + 0, 8);\n LZ4_memcpy(op + 8, match + 8, 8);\n LZ4_memcpy(op +16, match +16, 2);\n op += length + MINMATCH;\n /* Both stages worked, load the next token. */\n continue;\n }\n\n /* The second stage didn't work out, but the info is ready.\n * Propel it right to the point of match copying. */\n goto _copy_match;\n }\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n }\n\n#if LZ4_FAST_DEC_LOOP\n safe_literal_copy:\n#endif\n /* copy literals */\n cpy = op+length;\n\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {\n /* We've either hit the input parsing restriction or the output parsing restriction.\n * In the normal scenario, decoding a full block, it must be the last sequence,\n * otherwise it's an error (invalid input or dimensions).\n * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.\n */\n if (partialDecoding) {\n /* Since we are partial decoding we may be in this block because of the output parsing\n * restriction, which is not valid since the output buffer is allowed to be undersized.\n */\n DEBUGLOG(7, \"partialDecoding: copying literals, close to input or output end\")\n DEBUGLOG(7, \"partialDecoding: literal length = %u\", (unsigned)length);\n DEBUGLOG(7, \"partialDecoding: remaining space in dstBuffer : %i\", (int)(oend - op));\n DEBUGLOG(7, \"partialDecoding: remaining space in srcBuffer : %i\", (int)(iend - ip));\n /* Finishing in the middle of a literals segment,\n * due to lack of input.\n */\n if (ip+length > iend) {\n length = (size_t)(iend-ip);\n cpy = op + length;\n }\n /* Finishing in the middle of a literals segment,\n * due to lack of output space.\n */\n if (cpy > oend) {\n cpy = oend;\n assert(op<=oend);\n length = (size_t)(oend-op);\n }\n } else {\n /* We must be on the last sequence (or invalid) because of the parsing limitations\n * so check that we exactly consume the input and don't overrun the output buffer.\n */\n if ((ip+length != iend) || (cpy > oend)) {\n DEBUGLOG(5, \"should have been last run of literals\")\n DEBUGLOG(5, \"ip(%p) + length(%i) = %p != iend (%p)\", ip, (int)length, ip+length, iend);\n DEBUGLOG(5, \"or cpy(%p) > (oend-MFLIMIT)(%p)\", cpy, oend-MFLIMIT);\n DEBUGLOG(5, \"after writing %u bytes / %i bytes available\", (unsigned)(op-(BYTE*)dst), outputSize);\n goto _output_error;\n }\n }\n LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */\n ip += length;\n op += length;\n /* Necessarily EOF when !partialDecoding.\n * When partialDecoding, it is EOF if we've either\n * filled the output buffer or\n * can't proceed with reading an offset for following match.\n */\n if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {\n break;\n }\n } else {\n LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */\n ip += length; op = cpy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n match = op - offset;\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n _copy_match:\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */\n }\n length += MINMATCH;\n\n#if LZ4_FAST_DEC_LOOP\n safe_match_copy:\n#endif\n if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) length = MIN(length, (size_t)(oend-op));\n else goto _output_error; /* doesn't respect parsing restriction */\n }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) *op++ = *copyFrom++;\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n assert(match >= lowPrefix);\n\n /* copy match within block */\n cpy = op + length;\n\n /* partialDecoding : may end anywhere within the block */\n assert(op<=oend);\n if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n size_t const mlen = MIN(length, (size_t)(oend-op));\n const BYTE* const matchEnd = match + mlen;\n BYTE* const copyEnd = op + mlen;\n if (matchEnd > op) { /* overlap copy */\n while (op < copyEnd) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, mlen);\n }\n op = copyEnd;\n if (op == oend) { break; }\n continue;\n }\n\n if (unlikely(offset<8)) {\n LZ4_write32(op, 0); /* silence msan warning when offset==0 */\n op[0] = match[0];\n op[1] = match[1];\n op[2] = match[2];\n op[3] = match[3];\n match += inc32table[offset];\n LZ4_memcpy(op+4, match, 4);\n match -= dec64table[offset];\n } else {\n LZ4_memcpy(op, match, 8);\n match += 8;\n }\n op += 8;\n\n if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);\n if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */\n if (op < oCopyLimit) {\n LZ4_wildCopy8(op, match, oCopyLimit);\n match += oCopyLimit - op;\n op = oCopyLimit;\n }\n while (op < cpy) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, 8);\n if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }\n }\n op = cpy; /* wildcopy correction */\n }\n\n /* end of decoding */\n DEBUGLOG(5, \"decoded %i bytes\", (int) (((char*)op)-dst));\n return (int) (((char*)op)-dst); /* Nb of output bytes decoded */\n\n /* Overflow error detected */\n _output_error:\n return (int) (-(((const char*)ip)-src))-1;\n }\n}\n\n\n/*===== Instantiate the API decoding functions. =====*/\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,\n decode_full_block, noDict,\n (BYTE*)dest, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,\n partial_decode,\n noDict, (BYTE*)dst, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_fast(const char* source, char* dest, int originalSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_fast\");\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, NULL, 0);\n}\n\n/*===== Instantiate a few more decoding cases, used more than once. =====*/\n\nLZ4_FORCE_O2 /* Exported, an obsolete API function. */\nint LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\n/* Another obsolete API function, paired with the previous one. */\nint LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,\n size_t prefixSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_safe_forceExtDict\");\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,\n const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, (const BYTE*)dictStart, dictSize);\n}\n\n/* The \"double dictionary\" mode, for use with e.g. ring buffers: the first part\n * of the dictionary is passed as prefix, and the second via dictStart + dictSize.\n * These routines are used only once, in LZ4_decompress_*_continue().\n */\nLZ4_FORCE_INLINE\nint LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize, const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);\n}\n\n/*===== streaming decompression functions =====*/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_streamDecode_t* LZ4_createStreamDecode(void)\n{\n LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));\n return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));\n}\n\nint LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)\n{\n if (LZ4_stream == NULL) { return 0; } /* support free on NULL */\n FREEMEM(LZ4_stream);\n return 0;\n}\n#endif\n\n/*! LZ4_setStreamDecode() :\n * Use this function to instruct where to find the dictionary.\n * This function is not necessary if previous data is still available where it was decoded.\n * Loading a size of 0 is allowed (same effect as no dictionary).\n * @return : 1 if OK, 0 if error\n */\nint LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n lz4sd->prefixSize = (size_t)dictSize;\n if (dictSize) {\n assert(dictionary != NULL);\n lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;\n } else {\n lz4sd->prefixEnd = (const BYTE*) dictionary;\n }\n lz4sd->externalDict = NULL;\n lz4sd->extDictSize = 0;\n return 1;\n}\n\n/*! LZ4_decoderRingBufferSize() :\n * when setting a ring buffer for streaming decompression (optional scenario),\n * provides the minimum size of this ring buffer\n * to be compatible with any source respecting maxBlockSize condition.\n * Note : in a ring buffer scenario,\n * blocks are presumed decompressed next to each other.\n * When not enough space remains for next block (remainingSize < maxBlockSize),\n * decoding resumes from beginning of ring buffer.\n * @return : minimum ring buffer size,\n * or 0 if there is an error (invalid maxBlockSize).\n */\nint LZ4_decoderRingBufferSize(int maxBlockSize)\n{\n if (maxBlockSize < 0) return 0;\n if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;\n if (maxBlockSize < 16) maxBlockSize = 16;\n return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);\n}\n\n/*\n*_continue() :\n These decoding functions allow decompression of multiple blocks in \"streaming\" mode.\n Previously decoded blocks must still be available at the memory position where they were decoded.\n If it's not possible, save the relevant part of decoded data into a safe buffer,\n and indicate where it stands using LZ4_setStreamDecode()\n*/\nLZ4_FORCE_O2\nint LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n int result;\n\n if (lz4sd->prefixSize == 0) {\n /* The first call, no dictionary yet. */\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n /* They're rolling the current segment. */\n if (lz4sd->prefixSize >= 64 KB - 1)\n result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n else if (lz4sd->extDictSize == 0)\n result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize);\n else\n result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)result;\n lz4sd->prefixEnd += result;\n } else {\n /* The buffer wraps around, or they're switching to another buffer. */\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n }\n\n return result;\n}\n\nLZ4_FORCE_O2 int\nLZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,\n const char* source, char* dest, int originalSize)\n{\n LZ4_streamDecode_t_internal* const lz4sd =\n (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);\n int result;\n\n DEBUGLOG(5, \"LZ4_decompress_fast_continue (toDecodeSize=%i)\", originalSize);\n assert(originalSize >= 0);\n\n if (lz4sd->prefixSize == 0) {\n DEBUGLOG(5, \"first invocation : no prefix nor extDict\");\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_fast(source, dest, originalSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n DEBUGLOG(5, \"continue using existing prefix\");\n result = LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n lz4sd->prefixSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)originalSize;\n lz4sd->prefixEnd += originalSize;\n } else {\n DEBUGLOG(5, \"prefix becomes extDict\");\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_fast_extDict(source, dest, originalSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n }\n\n return result;\n}\n\n\n/*\nAdvanced decoding functions :\n*_usingDict() :\n These decoding functions work the same as \"_continue\" ones,\n the dictionary must be explicitly provided within parameters\n*/\n\nint LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0 || dictStart+dictSize == dest)\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n (size_t)dictSize, NULL, 0);\n assert(dictSize >= 0);\n return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);\n}\n\n\n/*=*************************************************\n* Obsolete Functions\n***************************************************/\n/* obsolete compression functions */\nint LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)\n{\n return LZ4_compress_default(source, dest, inputSize, maxOutputSize);\n}\nint LZ4_compress(const char* src, char* dest, int srcSize)\n{\n return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));\n}\nint LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);\n}\nint LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);\n}\nint LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);\n}\nint LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)\n{\n return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);\n}\n\n/*\nThese decompression functions are deprecated and should no longer be used.\nThey are only provided here for compatibility with older user programs.\n- LZ4_uncompress is totally equivalent to LZ4_decompress_fast\n- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe\n*/\nint LZ4_uncompress (const char* source, char* dest, int outputSize)\n{\n return LZ4_decompress_fast(source, dest, outputSize);\n}\nint LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)\n{\n return LZ4_decompress_safe(source, dest, isize, maxOutputSize);\n}\n\n/* Obsolete Streaming functions */\n\nint LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }\n\nint LZ4_resetStreamState(void* state, char* inputBuffer)\n{\n (void)inputBuffer;\n LZ4_resetStream((LZ4_stream_t*)state);\n return 0;\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nvoid* LZ4_create (char* inputBuffer)\n{\n (void)inputBuffer;\n return LZ4_createStream();\n}\n#endif\n\nchar* LZ4_slideInputBuffer (void* state)\n{\n /* avoid const char * -> char * conversion warning */\n return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;\n}\n\n#endif /* LZ4_COMMONDEFS_ONLY */\n"], ["/pogocache/src/hashmap.c", "// Copyright 2020 Joshua J Baker. All rights reserved.\n// Use of this source code is governed by an MIT-style\n// license that can be found in the LICENSE file.\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\n#define GROW_AT 0.60 /* 60% */\n#define SHRINK_AT 0.10 /* 10% */\n\n#ifndef HASHMAP_LOAD_FACTOR\n#define HASHMAP_LOAD_FACTOR GROW_AT\n#endif\n\nstatic void *(*__malloc)(size_t) = NULL;\nstatic void *(*__realloc)(void *, size_t) = NULL;\nstatic void (*__free)(void *) = NULL;\n\n// hashmap_set_allocator allows for configuring a custom allocator for\n// all hashmap library operations. This function, if needed, should be called\n// only once at startup and a prior to calling hashmap_new().\nvoid hashmap_set_allocator(void *(*malloc)(size_t), void (*free)(void*)) {\n __malloc = malloc;\n __free = free;\n}\n\nstruct bucket {\n uint64_t hash:48;\n uint64_t dib:16;\n};\n\n// hashmap is an open addressed hash map using robinhood hashing.\nstruct hashmap {\n void *(*malloc)(size_t);\n void *(*realloc)(void *, size_t);\n void (*free)(void *);\n size_t elsize;\n size_t cap;\n uint64_t seed0;\n uint64_t seed1;\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1);\n int (*compare)(const void *a, const void *b, void *udata);\n void (*elfree)(void *item);\n void *udata;\n size_t bucketsz;\n size_t nbuckets;\n size_t count;\n size_t mask;\n size_t growat;\n size_t shrinkat;\n uint8_t loadfactor;\n uint8_t growpower;\n bool oom;\n void *buckets;\n void *spare;\n void *edata;\n};\n\nvoid hashmap_set_grow_by_power(struct hashmap *map, size_t power) {\n map->growpower = power < 1 ? 1 : power > 16 ? 16 : power;\n}\n\nstatic double clamp_load_factor(double factor, double default_factor) {\n // Check for NaN and clamp between 50% and 90%\n return factor != factor ? default_factor : \n factor < 0.50 ? 0.50 : \n factor > 0.95 ? 0.95 : \n factor;\n}\n\nvoid hashmap_set_load_factor(struct hashmap *map, double factor) {\n factor = clamp_load_factor(factor, map->loadfactor / 100.0);\n map->loadfactor = factor * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n}\n\nstatic struct bucket *bucket_at0(void *buckets, size_t bucketsz, size_t i) {\n return (struct bucket*)(((char*)buckets)+(bucketsz*i));\n}\n\nstatic struct bucket *bucket_at(struct hashmap *map, size_t index) {\n return bucket_at0(map->buckets, map->bucketsz, index);\n}\n\nstatic void *bucket_item(struct bucket *entry) {\n return ((char*)entry)+sizeof(struct bucket);\n}\n\nstatic uint64_t clip_hash(uint64_t hash) {\n return hash & 0xFFFFFFFFFFFF;\n}\n\nstatic uint64_t get_hash(struct hashmap *map, const void *key) {\n return clip_hash(map->hash(key, map->seed0, map->seed1));\n}\n\n\n// hashmap_new_with_allocator returns a new hash map using a custom allocator.\n// See hashmap_new for more information information\nstruct hashmap *hashmap_new_with_allocator(void *(*_malloc)(size_t), \n void *(*_realloc)(void*, size_t), void (*_free)(void*),\n size_t elsize, size_t cap, uint64_t seed0, uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n _malloc = _malloc ? _malloc : __malloc ? __malloc : malloc;\n _realloc = _realloc ? _realloc : __realloc ? __realloc : realloc;\n _free = _free ? _free : __free ? __free : free;\n size_t ncap = 16;\n if (cap < ncap) {\n cap = ncap;\n } else {\n while (ncap < cap) {\n ncap *= 2;\n }\n cap = ncap;\n }\n size_t bucketsz = sizeof(struct bucket) + elsize;\n while (bucketsz & (sizeof(uintptr_t)-1)) {\n bucketsz++;\n }\n // hashmap + spare + edata\n size_t size = sizeof(struct hashmap)+bucketsz*2;\n struct hashmap *map = _malloc(size);\n if (!map) {\n return NULL;\n }\n memset(map, 0, sizeof(struct hashmap));\n map->elsize = elsize;\n map->bucketsz = bucketsz;\n map->seed0 = seed0;\n map->seed1 = seed1;\n map->hash = hash;\n map->compare = compare;\n map->elfree = elfree;\n map->udata = udata;\n map->spare = ((char*)map)+sizeof(struct hashmap);\n map->edata = (char*)map->spare+bucketsz;\n map->cap = cap;\n map->nbuckets = cap;\n map->mask = map->nbuckets-1;\n map->buckets = _malloc(map->bucketsz*map->nbuckets);\n if (!map->buckets) {\n _free(map);\n return NULL;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->growpower = 1;\n map->loadfactor = clamp_load_factor(HASHMAP_LOAD_FACTOR, GROW_AT) * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n map->shrinkat = map->nbuckets * SHRINK_AT;\n map->malloc = _malloc;\n map->realloc = _realloc;\n map->free = _free;\n return map; \n}\n\n// hashmap_new returns a new hash map. \n// Param `elsize` is the size of each element in the tree. Every element that\n// is inserted, deleted, or retrieved will be this size.\n// Param `cap` is the default lower capacity of the hashmap. Setting this to\n// zero will default to 16.\n// Params `seed0` and `seed1` are optional seed values that are passed to the \n// following `hash` function. These can be any value you wish but it's often \n// best to use randomly generated values.\n// Param `hash` is a function that generates a hash value for an item. It's\n// important that you provide a good hash function, otherwise it will perform\n// poorly or be vulnerable to Denial-of-service attacks. This implementation\n// comes with two helper functions `hashmap_sip()` and `hashmap_murmur()`.\n// Param `compare` is a function that compares items in the tree. See the \n// qsort stdlib function for an example of how this function works.\n// The hashmap must be freed with hashmap_free(). \n// Param `elfree` is a function that frees a specific item. This should be NULL\n// unless you're storing some kind of reference data in the hash.\nstruct hashmap *hashmap_new(size_t elsize, size_t cap, uint64_t seed0, \n uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n return hashmap_new_with_allocator(NULL, NULL, NULL, elsize, cap, seed0, \n seed1, hash, compare, elfree, udata);\n}\n\nstatic void free_elements(struct hashmap *map) {\n if (map->elfree) {\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib) map->elfree(bucket_item(bucket));\n }\n }\n}\n\n// hashmap_clear quickly clears the map. \n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\n// When the update_cap is provided, the map's capacity will be updated to match\n// the currently number of allocated buckets. This is an optimization to ensure\n// that this operation does not perform any allocations.\nvoid hashmap_clear(struct hashmap *map, bool update_cap) {\n map->count = 0;\n free_elements(map);\n if (update_cap) {\n map->cap = map->nbuckets;\n } else if (map->nbuckets != map->cap) {\n void *new_buckets = map->malloc(map->bucketsz*map->cap);\n if (new_buckets) {\n map->free(map->buckets);\n map->buckets = new_buckets;\n }\n map->nbuckets = map->cap;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * (map->loadfactor / 100.0) ;\n map->shrinkat = map->nbuckets * SHRINK_AT;\n}\n\nstatic bool resize0(struct hashmap *map, size_t new_cap) {\n struct hashmap *map2 = hashmap_new_with_allocator(map->malloc, map->realloc, \n map->free, map->elsize, new_cap, map->seed0, map->seed1, map->hash, \n map->compare, map->elfree, map->udata);\n if (!map2) return false;\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *entry = bucket_at(map, i);\n if (!entry->dib) {\n continue;\n }\n entry->dib = 1;\n size_t j = entry->hash & map2->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map2, j);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n break;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map2->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map2->spare, map->bucketsz);\n }\n j = (j + 1) & map2->mask;\n entry->dib += 1;\n }\n }\n map->free(map->buckets);\n map->buckets = map2->buckets;\n map->nbuckets = map2->nbuckets;\n map->mask = map2->mask;\n map->growat = map2->growat;\n map->shrinkat = map2->shrinkat;\n map->free(map2);\n return true;\n}\n\nstatic bool resize(struct hashmap *map, size_t new_cap) {\n return resize0(map, new_cap);\n}\n\n// hashmap_set_with_hash works like hashmap_set but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_set_with_hash(struct hashmap *map, const void *item,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*(1<growpower))) {\n map->oom = true;\n return NULL;\n }\n }\n\n struct bucket *entry = map->edata;\n entry->hash = hash;\n entry->dib = 1;\n void *eitem = bucket_item(entry);\n memcpy(eitem, item, map->elsize);\n\n void *bitem;\n size_t i = entry->hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n map->count++;\n return NULL;\n }\n bitem = bucket_item(bucket);\n if (entry->hash == bucket->hash && (!map->compare ||\n map->compare(eitem, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n memcpy(bitem, eitem, map->elsize);\n return map->spare;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map->spare, map->bucketsz);\n eitem = bucket_item(entry);\n }\n i = (i + 1) & map->mask;\n entry->dib += 1;\n }\n}\n\n// hashmap_set inserts or replaces an item in the hash map. If an item is\n// replaced then it is returned otherwise NULL is returned. This operation\n// may allocate memory. If the system is unable to allocate additional\n// memory then NULL is returned and hashmap_oom() returns true.\nconst void *hashmap_set(struct hashmap *map, const void *item) {\n return hashmap_set_with_hash(map, item, get_hash(map, item));\n}\n\n// hashmap_get_with_hash works like hashmap_get but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_get_with_hash(struct hashmap *map, const void *key, \n uint64_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) return NULL;\n if (bucket->hash == hash) {\n void *bitem = bucket_item(bucket);\n if (!map->compare || map->compare(key, bitem, map->udata) == 0) {\n return bitem;\n }\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_get returns the item based on the provided key. If the item is not\n// found then NULL is returned.\nconst void *hashmap_get(struct hashmap *map, const void *key) {\n return hashmap_get_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_probe returns the item in the bucket at position or NULL if an item\n// is not set for that bucket. The position is 'moduloed' by the number of \n// buckets in the hashmap.\nconst void *hashmap_probe(struct hashmap *map, uint64_t position) {\n size_t i = position & map->mask;\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n return bucket_item(bucket);\n}\n\n// hashmap_delete_with_hash works like hashmap_delete but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_delete_with_hash(struct hashmap *map, const void *key,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n void *bitem = bucket_item(bucket);\n if (bucket->hash == hash && (!map->compare ||\n map->compare(key, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n bucket->dib = 0;\n while(1) {\n struct bucket *prev = bucket;\n i = (i + 1) & map->mask;\n bucket = bucket_at(map, i);\n if (bucket->dib <= 1) {\n prev->dib = 0;\n break;\n }\n memcpy(prev, bucket, map->bucketsz);\n prev->dib--;\n }\n map->count--;\n if (map->nbuckets > map->cap && map->count <= map->shrinkat) {\n // Ignore the return value. It's ok for the resize operation to\n // fail to allocate enough memory because a shrink operation\n // does not change the integrity of the data.\n resize(map, map->nbuckets/2);\n }\n return map->spare;\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_delete removes an item from the hash map and returns it. If the\n// item is not found then NULL is returned.\nconst void *hashmap_delete(struct hashmap *map, const void *key) {\n return hashmap_delete_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_count returns the number of items in the hash map.\nsize_t hashmap_count(struct hashmap *map) {\n return map->count;\n}\n\n// hashmap_free frees the hash map\n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\nvoid hashmap_free(struct hashmap *map) {\n if (!map) return;\n free_elements(map);\n map->free(map->buckets);\n map->free(map);\n}\n\n// hashmap_oom returns true if the last hashmap_set() call failed due to the \n// system being out of memory.\nbool hashmap_oom(struct hashmap *map) {\n return map->oom;\n}\n\n// hashmap_scan iterates over all items in the hash map\n// Param `iter` can return false to stop iteration early.\n// Returns false if the iteration has been stopped early.\nbool hashmap_scan(struct hashmap *map, \n bool (*iter)(const void *item, void *udata), void *udata)\n{\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib && !iter(bucket_item(bucket), udata)) {\n return false;\n }\n }\n return true;\n}\n\n// hashmap_iter iterates one key at a time yielding a reference to an\n// entry at each iteration. Useful to write simple loops and avoid writing\n// dedicated callbacks and udata structures, as in hashmap_scan.\n//\n// map is a hash map handle. i is a pointer to a size_t cursor that\n// should be initialized to 0 at the beginning of the loop. item is a void\n// pointer pointer that is populated with the retrieved item. Note that this\n// is NOT a copy of the item stored in the hash map and can be directly\n// modified.\n//\n// Note that if hashmap_delete() is called on the hashmap being iterated,\n// the buckets are rearranged and the iterator must be reset to 0, otherwise\n// unexpected results may be returned after deletion.\n//\n// This function has not been tested for thread safety.\n//\n// The function returns true if an item was retrieved; false if the end of the\n// iteration has been reached.\nbool hashmap_iter(struct hashmap *map, size_t *i, void **item) {\n struct bucket *bucket;\n do {\n if (*i >= map->nbuckets) return false;\n bucket = bucket_at(map, *i);\n (*i)++;\n } while (!bucket->dib);\n *item = bucket_item(bucket);\n return true;\n}\n\n\n//-----------------------------------------------------------------------------\n// SipHash reference C implementation\n//\n// Copyright (c) 2012-2016 Jean-Philippe Aumasson\n// \n// Copyright (c) 2012-2014 Daniel J. Bernstein \n//\n// To the extent possible under law, the author(s) have dedicated all copyright\n// and related and neighboring rights to this software to the public domain\n// worldwide. This software is distributed without any warranty.\n//\n// You should have received a copy of the CC0 Public Domain Dedication along\n// with this software. If not, see\n// .\n//\n// default: SipHash-2-4\n//-----------------------------------------------------------------------------\nstatic uint64_t SIP64(const uint8_t *in, const size_t inlen, uint64_t seed0,\n uint64_t seed1) \n{\n#define U8TO64_LE(p) \\\n { (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \\\n ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \\\n ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \\\n ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56)) }\n#define U64TO8_LE(p, v) \\\n { U32TO8_LE((p), (uint32_t)((v))); \\\n U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); }\n#define U32TO8_LE(p, v) \\\n { (p)[0] = (uint8_t)((v)); \\\n (p)[1] = (uint8_t)((v) >> 8); \\\n (p)[2] = (uint8_t)((v) >> 16); \\\n (p)[3] = (uint8_t)((v) >> 24); }\n#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))\n#define SIPROUND \\\n { v0 += v1; v1 = ROTL(v1, 13); \\\n v1 ^= v0; v0 = ROTL(v0, 32); \\\n v2 += v3; v3 = ROTL(v3, 16); \\\n v3 ^= v2; \\\n v0 += v3; v3 = ROTL(v3, 21); \\\n v3 ^= v0; \\\n v2 += v1; v1 = ROTL(v1, 17); \\\n v1 ^= v2; v2 = ROTL(v2, 32); }\n uint64_t k0 = U8TO64_LE((uint8_t*)&seed0);\n uint64_t k1 = U8TO64_LE((uint8_t*)&seed1);\n uint64_t v3 = UINT64_C(0x7465646279746573) ^ k1;\n uint64_t v2 = UINT64_C(0x6c7967656e657261) ^ k0;\n uint64_t v1 = UINT64_C(0x646f72616e646f6d) ^ k1;\n uint64_t v0 = UINT64_C(0x736f6d6570736575) ^ k0;\n const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t));\n for (; in != end; in += 8) {\n uint64_t m = U8TO64_LE(in);\n v3 ^= m;\n SIPROUND; SIPROUND;\n v0 ^= m;\n }\n const int left = inlen & 7;\n uint64_t b = ((uint64_t)inlen) << 56;\n switch (left) {\n case 7: b |= ((uint64_t)in[6]) << 48; /* fall through */\n case 6: b |= ((uint64_t)in[5]) << 40; /* fall through */\n case 5: b |= ((uint64_t)in[4]) << 32; /* fall through */\n case 4: b |= ((uint64_t)in[3]) << 24; /* fall through */\n case 3: b |= ((uint64_t)in[2]) << 16; /* fall through */\n case 2: b |= ((uint64_t)in[1]) << 8; /* fall through */\n case 1: b |= ((uint64_t)in[0]); break;\n case 0: break;\n }\n v3 ^= b;\n SIPROUND; SIPROUND;\n v0 ^= b;\n v2 ^= 0xff;\n SIPROUND; SIPROUND; SIPROUND; SIPROUND;\n b = v0 ^ v1 ^ v2 ^ v3;\n uint64_t out = 0;\n U64TO8_LE((uint8_t*)&out, b);\n return out;\n}\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n//\n// Murmur3_86_128\n//-----------------------------------------------------------------------------\nstatic uint64_t MM86128(const void *key, const int len, uint32_t seed) {\n#define\tROTL32(x, r) ((x << r) | (x >> (32 - r)))\n#define FMIX32(h) h^=h>>16; h*=0x85ebca6b; h^=h>>13; h*=0xc2b2ae35; h^=h>>16;\n const uint8_t * data = (const uint8_t*)key;\n const int nblocks = len / 16;\n uint32_t h1 = seed;\n uint32_t h2 = seed;\n uint32_t h3 = seed;\n uint32_t h4 = seed;\n uint32_t c1 = 0x239b961b; \n uint32_t c2 = 0xab0e9789;\n uint32_t c3 = 0x38b34ae5; \n uint32_t c4 = 0xa1e38b93;\n const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);\n for (int i = -nblocks; i; i++) {\n uint32_t k1 = blocks[i*4+0];\n uint32_t k2 = blocks[i*4+1];\n uint32_t k3 = blocks[i*4+2];\n uint32_t k4 = blocks[i*4+3];\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;\n }\n const uint8_t * tail = (const uint8_t*)(data + nblocks*16);\n uint32_t k1 = 0;\n uint32_t k2 = 0;\n uint32_t k3 = 0;\n uint32_t k4 = 0;\n switch(len & 15) {\n case 15: k4 ^= tail[14] << 16; /* fall through */\n case 14: k4 ^= tail[13] << 8; /* fall through */\n case 13: k4 ^= tail[12] << 0;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n /* fall through */\n case 12: k3 ^= tail[11] << 24; /* fall through */\n case 11: k3 ^= tail[10] << 16; /* fall through */\n case 10: k3 ^= tail[ 9] << 8; /* fall through */\n case 9: k3 ^= tail[ 8] << 0;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n /* fall through */\n case 8: k2 ^= tail[ 7] << 24; /* fall through */\n case 7: k2 ^= tail[ 6] << 16; /* fall through */\n case 6: k2 ^= tail[ 5] << 8; /* fall through */\n case 5: k2 ^= tail[ 4] << 0;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n /* fall through */\n case 4: k1 ^= tail[ 3] << 24; /* fall through */\n case 3: k1 ^= tail[ 2] << 16; /* fall through */\n case 2: k1 ^= tail[ 1] << 8; /* fall through */\n case 1: k1 ^= tail[ 0] << 0;\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n /* fall through */\n };\n h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n FMIX32(h1); FMIX32(h2); FMIX32(h3); FMIX32(h4);\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n return (((uint64_t)h2)<<32)|h1;\n}\n\n//-----------------------------------------------------------------------------\n// xxHash Library\n// Copyright (c) 2012-2021 Yann Collet\n// All rights reserved.\n// \n// BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)\n//\n// xxHash3\n//-----------------------------------------------------------------------------\n#define XXH_PRIME_1 11400714785074694791ULL\n#define XXH_PRIME_2 14029467366897019727ULL\n#define XXH_PRIME_3 1609587929392839161ULL\n#define XXH_PRIME_4 9650029242287828579ULL\n#define XXH_PRIME_5 2870177450012600261ULL\n\nstatic uint64_t XXH_read64(const void* memptr) {\n uint64_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint32_t XXH_read32(const void* memptr) {\n uint32_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint64_t XXH_rotl64(uint64_t x, int r) {\n return (x << r) | (x >> (64 - r));\n}\n\nstatic uint64_t xxh3(const void* data, size_t len, uint64_t seed) {\n const uint8_t* p = (const uint8_t*)data;\n const uint8_t* const end = p + len;\n uint64_t h64;\n\n if (len >= 32) {\n const uint8_t* const limit = end - 32;\n uint64_t v1 = seed + XXH_PRIME_1 + XXH_PRIME_2;\n uint64_t v2 = seed + XXH_PRIME_2;\n uint64_t v3 = seed + 0;\n uint64_t v4 = seed - XXH_PRIME_1;\n\n do {\n v1 += XXH_read64(p) * XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n\n v2 += XXH_read64(p + 8) * XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n\n v3 += XXH_read64(p + 16) * XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n\n v4 += XXH_read64(p + 24) * XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n\n p += 32;\n } while (p <= limit);\n\n h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + \n XXH_rotl64(v4, 18);\n\n v1 *= XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n h64 ^= v1;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v2 *= XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n h64 ^= v2;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v3 *= XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n h64 ^= v3;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v4 *= XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n h64 ^= v4;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n }\n else {\n h64 = seed + XXH_PRIME_5;\n }\n\n h64 += (uint64_t)len;\n\n while (p + 8 <= end) {\n uint64_t k1 = XXH_read64(p);\n k1 *= XXH_PRIME_2;\n k1 = XXH_rotl64(k1, 31);\n k1 *= XXH_PRIME_1;\n h64 ^= k1;\n h64 = XXH_rotl64(h64, 27) * XXH_PRIME_1 + XXH_PRIME_4;\n p += 8;\n }\n\n if (p + 4 <= end) {\n h64 ^= (uint64_t)(XXH_read32(p)) * XXH_PRIME_1;\n h64 = XXH_rotl64(h64, 23) * XXH_PRIME_2 + XXH_PRIME_3;\n p += 4;\n }\n\n while (p < end) {\n h64 ^= (*p) * XXH_PRIME_5;\n h64 = XXH_rotl64(h64, 11) * XXH_PRIME_1;\n p++;\n }\n\n h64 ^= h64 >> 33;\n h64 *= XXH_PRIME_2;\n h64 ^= h64 >> 29;\n h64 *= XXH_PRIME_3;\n h64 ^= h64 >> 32;\n\n return h64;\n}\n\n// hashmap_sip returns a hash value for `data` using SipHash-2-4.\nuint64_t hashmap_sip(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n return SIP64((uint8_t*)data, len, seed0, seed1);\n}\n\n// hashmap_murmur returns a hash value for `data` using Murmur3_86_128.\nuint64_t hashmap_murmur(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return MM86128(data, len, seed0);\n}\n\nuint64_t hashmap_xxhash3(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return xxh3(data, len ,seed0);\n}\n\n//==============================================================================\n// TESTS AND BENCHMARKS\n// $ cc -DHASHMAP_TEST hashmap.c && ./a.out # run tests\n// $ cc -DHASHMAP_TEST -O3 hashmap.c && BENCH=1 ./a.out # run benchmarks\n//==============================================================================\n#ifdef HASHMAP_TEST\n\nstatic size_t deepcount(struct hashmap *map) {\n size_t count = 0;\n for (size_t i = 0; i < map->nbuckets; i++) {\n if (bucket_at(map, i)->dib) {\n count++;\n }\n }\n return count;\n}\n\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wpedantic\"\n#endif\n#ifdef __clang__\n#pragma GCC diagnostic ignored \"-Wunknown-warning-option\"\n#pragma GCC diagnostic ignored \"-Wcompound-token-split-by-macro\"\n#pragma GCC diagnostic ignored \"-Wgnu-statement-expression-from-macro-expansion\"\n#endif\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\nstatic bool rand_alloc_fail = false;\nstatic int rand_alloc_fail_odds = 3; // 1 in 3 chance malloc will fail.\nstatic uintptr_t total_allocs = 0;\nstatic uintptr_t total_mem = 0;\n\nstatic void *xmalloc(size_t size) {\n if (rand_alloc_fail && rand()%rand_alloc_fail_odds == 0) {\n return NULL;\n }\n void *mem = malloc(sizeof(uintptr_t)+size);\n assert(mem);\n *(uintptr_t*)mem = size;\n total_allocs++;\n total_mem += size;\n return (char*)mem+sizeof(uintptr_t);\n}\n\nstatic void xfree(void *ptr) {\n if (ptr) {\n total_mem -= *(uintptr_t*)((char*)ptr-sizeof(uintptr_t));\n free((char*)ptr-sizeof(uintptr_t));\n total_allocs--;\n }\n}\n\nstatic void shuffle(void *array, size_t numels, size_t elsize) {\n char tmp[elsize];\n char *arr = array;\n for (size_t i = 0; i < numels - 1; i++) {\n int j = i + rand() / (RAND_MAX / (numels - i) + 1);\n memcpy(tmp, arr + j * elsize, elsize);\n memcpy(arr + j * elsize, arr + i * elsize, elsize);\n memcpy(arr + i * elsize, tmp, elsize);\n }\n}\n\nstatic bool iter_ints(const void *item, void *udata) {\n int *vals = *(int**)udata;\n vals[*(int*)item] = 1;\n return true;\n}\n\nstatic int compare_ints_udata(const void *a, const void *b, void *udata) {\n return *(int*)a - *(int*)b;\n}\n\nstatic int compare_strs(const void *a, const void *b, void *udata) {\n return strcmp(*(char**)a, *(char**)b);\n}\n\nstatic uint64_t hash_int(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(item, sizeof(int), seed0, seed1);\n // return hashmap_sip(item, sizeof(int), seed0, seed1);\n // return hashmap_murmur(item, sizeof(int), seed0, seed1);\n}\n\nstatic uint64_t hash_str(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_sip(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_murmur(*(char**)item, strlen(*(char**)item), seed0, seed1);\n}\n\nstatic void free_str(void *item) {\n xfree(*(char**)item);\n}\n\nstatic void all(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):2000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n rand_alloc_fail = true;\n\n // test sip and murmur hashes\n assert(hashmap_sip(\"hello\", 5, 1, 2) == 2957200328589801622);\n assert(hashmap_murmur(\"hello\", 5, 1, 2) == 1682575153221130884);\n assert(hashmap_xxhash3(\"hello\", 5, 1, 2) == 2584346877953614258);\n\n int *vals;\n while (!(vals = xmalloc(N * sizeof(int)))) {}\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n struct hashmap *map;\n\n while (!(map = hashmap_new(sizeof(int), 0, seed, seed, \n hash_int, compare_ints_udata, NULL, NULL))) {}\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n // // printf(\"== %d ==\\n\", vals[i]);\n assert(map->count == (size_t)i);\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n const int *v;\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n \n for (int j = 0; j < i; j++) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n while (true) {\n v = hashmap_set(map, &vals[i]);\n if (!v) {\n assert(hashmap_oom(map));\n continue;\n } else {\n assert(!hashmap_oom(map));\n assert(v && *v == vals[i]);\n break;\n }\n }\n v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n assert(!hashmap_set(map, &vals[i]));\n assert(map->count == (size_t)(i+1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n }\n\n int *vals2;\n while (!(vals2 = xmalloc(N * sizeof(int)))) {}\n memset(vals2, 0, N * sizeof(int));\n assert(hashmap_scan(map, iter_ints, &vals2));\n\n // Test hashmap_iter. This does the same as hashmap_scan above.\n size_t iter = 0;\n void *iter_val;\n while (hashmap_iter (map, &iter, &iter_val)) {\n assert (iter_ints(iter_val, &vals2));\n }\n for (int i = 0; i < N; i++) {\n assert(vals2[i] == 1);\n }\n xfree(vals2);\n\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n const int *v;\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(map->count == (size_t)(N-i-1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n for (int j = N-1; j > i; j--) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n }\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n assert(map->count != 0);\n size_t prev_cap = map->cap;\n hashmap_clear(map, true);\n assert(prev_cap < map->cap);\n assert(map->count == 0);\n\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n prev_cap = map->cap;\n hashmap_clear(map, false);\n assert(prev_cap == map->cap);\n\n hashmap_free(map);\n\n xfree(vals);\n\n\n while (!(map = hashmap_new(sizeof(char*), 0, seed, seed,\n hash_str, compare_strs, free_str, NULL)));\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_clear(map, false);\n assert(hashmap_count(map) == 0);\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_free(map);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\n#define bench(name, N, code) {{ \\\n if (strlen(name) > 0) { \\\n printf(\"%-14s \", name); \\\n } \\\n size_t tmem = total_mem; \\\n size_t tallocs = total_allocs; \\\n uint64_t bytes = 0; \\\n clock_t begin = clock(); \\\n for (int i = 0; i < N; i++) { \\\n (code); \\\n } \\\n clock_t end = clock(); \\\n double elapsed_secs = (double)(end - begin) / CLOCKS_PER_SEC; \\\n double bytes_sec = (double)bytes/elapsed_secs; \\\n printf(\"%d ops in %.3f secs, %.0f ns/op, %.0f op/sec\", \\\n N, elapsed_secs, \\\n elapsed_secs/(double)N*1e9, \\\n (double)N/elapsed_secs \\\n ); \\\n if (bytes > 0) { \\\n printf(\", %.1f GB/sec\", bytes_sec/1024/1024/1024); \\\n } \\\n if (total_mem > tmem) { \\\n size_t used_mem = total_mem-tmem; \\\n printf(\", %.2f bytes/op\", (double)used_mem/N); \\\n } \\\n if (total_allocs > tallocs) { \\\n size_t used_allocs = total_allocs-tallocs; \\\n printf(\", %.2f allocs/op\", (double)used_allocs/N); \\\n } \\\n printf(\"\\n\"); \\\n}}\n\nstatic void benchmarks(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):5000000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n\n int *vals = xmalloc(N * sizeof(int));\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n shuffle(vals, N, sizeof(int));\n\n struct hashmap *map;\n shuffle(vals, N, sizeof(int));\n\n map = hashmap_new(sizeof(int), 0, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete\", N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n hashmap_free(map);\n\n map = hashmap_new(sizeof(int), N, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set (cap)\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get (cap)\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete (cap)\" , N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n\n hashmap_free(map);\n\n \n xfree(vals);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\nint main(void) {\n hashmap_set_allocator(xmalloc, xfree);\n\n if (getenv(\"BENCH\")) {\n printf(\"Running hashmap.c benchmarks...\\n\");\n benchmarks();\n } else {\n printf(\"Running hashmap.c tests...\\n\");\n all();\n printf(\"PASSED\\n\");\n }\n}\n\n\n#endif\n\n\n"], ["/pogocache/src/util.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit util.c provides various utilities and convenience functions.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n\n// Performs a case-insenstive equality test between the byte slice 'data' and\n// a c-string. It's expected that c-string is already lowercase and \n// null-terminated. The data does not need to be null-terminated.\nbool argeq_bytes(const void *data, size_t datalen, const char *cstr) {\n const char *p = data;\n const char *e = p+datalen;\n bool eq = true;\n while (eq && p < e && *cstr) {\n eq = tolower(*p) == *cstr;\n p++;\n cstr++;\n }\n return eq && *cstr == '\\0' && p == e;\n}\n\nbool argeq(struct args *args, int idx, const char *cstr) {\n return argeq_bytes(args->bufs[idx].data, args->bufs[idx].len, cstr);\n}\n\n// Safely adds two int64_t values and with clamping on overflow.\nint64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n// Safely multiplies two int64_t values and with clamping on overflow.\nint64_t int64_mul_clamp(int64_t a, int64_t b) {\n if (a || b) {\n if (a > 0) {\n if (b > 0 && a > INT64_MAX / b) {\n return INT64_MAX;\n } else if (b < 0 && b < INT64_MIN / a) {\n return INT64_MIN;\n }\n } else {\n if (b > 0 && a < INT64_MIN / b) {\n return INT64_MIN;\n } else if (b < 0 && a < INT64_MAX / b) {\n return INT64_MAX;\n }\n }\n }\n return a * b;\n}\n\n/// https://github.com/tidwall/varint.c\nint varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nint varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\nint varint_write_i64(void *data, int64_t x) {\n uint64_t ux = (uint64_t)x << 1;\n ux = x < 0 ? ~ux : ux;\n return varint_write_u64(data, ux);\n}\n\nint varint_read_i64(const void *data, size_t len, int64_t *x) {\n uint64_t ux;\n int n = varint_read_u64(data, len, &ux);\n *x = (int64_t)(ux >> 1);\n *x = ux&1 ? ~*x : *x;\n return n;\n}\n\n\nconst char *memstr(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0fB\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fK\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fM\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1fG\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0G\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0M\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0K\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\nconst char *memstr_long(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0f bytes\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f KB\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f MB\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1f GB\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0 GB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 MB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 KB\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nuint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\nuint64_t rand_next(uint64_t *seed) {\n // pcg + mix13\n *seed = (*seed * UINT64_C(6364136223846793005)) + 1;\n return mix13(*seed);\n}\n\nvoid write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n bytes[4] = (x>>32)&0xFF;\n bytes[5] = (x>>40)&0xFF;\n bytes[6] = (x>>48)&0xFF;\n bytes[7] = (x>>56)&0xFF;\n}\n\nuint64_t read_u64(const void *data) {\n const uint8_t *bytes = data;\n uint64_t x = 0;\n x |= ((uint64_t)bytes[0])<<0;\n x |= ((uint64_t)bytes[1])<<8;\n x |= ((uint64_t)bytes[2])<<16;\n x |= ((uint64_t)bytes[3])<<24;\n x |= ((uint64_t)bytes[4])<<32;\n x |= ((uint64_t)bytes[5])<<40;\n x |= ((uint64_t)bytes[6])<<48;\n x |= ((uint64_t)bytes[7])<<56;\n return x;\n}\n\nvoid write_u32(void *data, uint32_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n}\n\nuint32_t read_u32(const void *data) {\n const uint8_t *bytes = data;\n uint32_t x = 0;\n x |= ((uint32_t)bytes[0])<<0;\n x |= ((uint32_t)bytes[1])<<8;\n x |= ((uint32_t)bytes[2])<<16;\n x |= ((uint32_t)bytes[3])<<24;\n return x;\n}\n\n// https://www.w3.org/TR/2003/REC-PNG-20031110/#D-CRCAppendix\nuint32_t crc32(const void *data, size_t len) {\n static __thread uint32_t table[256];\n static __thread bool computed = false;\n if (!computed) {\n for (uint32_t n = 0; n < 256; n++) {\n uint32_t c = n;\n for (int k = 0; k < 8; k++) {\n c = (c&1)?0xedb88320L^(c>>1):c>>1;\n }\n table[n] = c;\n }\n computed = true;\n }\n uint32_t crc = ~0;\n const uint8_t *buf = data;\n for (size_t n = 0; n < len; n++) {\n crc = table[(crc^buf[n])&0xff]^(crc>>8);\n }\n return ~crc;\n}\n\n// Attempts to read exactly len bytes from file stream\n// Returns the number of bytes read. Anything less than len means the stream\n// was closed or an error occured while reading.\n// Return -1 if no bytes were read and there was an error.\nssize_t read_full(int fd, void *data, size_t len) {\n uint8_t *bytes = data;\n size_t total = 0;\n while (len > 0) {\n ssize_t n = read(fd, bytes+total, len);\n if (n <= 0) {\n if (total > 0) {\n break;\n }\n return n;\n }\n len -= n;\n total += n;\n }\n return total;\n}\n\nsize_t u64toa(uint64_t x, uint8_t *data) {\n if (x < 10) {\n data[0] = '0'+x;\n return 1;\n }\n size_t i = 0;\n do {\n data[i++] = '0' + x % 10;\n } while ((x /= 10) > 0);\n // reverse the characters\n for (size_t j = 0, k = i-1; j < k; j++, k--) {\n uint8_t ch = data[j];\n data[j] = data[k];\n data[k] = ch;\n }\n return i;\n}\n\nsize_t i64toa(int64_t x, uint8_t *data) {\n if (x < 0) {\n data[0] = '-';\n data++;\n return u64toa(x * -1, data) + 1;\n }\n return u64toa(x, data);\n}\n\nuint32_t fnv1a_case(const char* buf, size_t len) {\n uint32_t hash = 0x811c9dc5;\n for (size_t i = 0; i < len; i++) {\n hash = (hash ^ tolower(buf[i])) * 0x01000193;\n }\n\treturn hash;\n}\n\nbool parse_i64(const char *data, size_t len, int64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n errno = 0;\n char *end;\n *x = strtoll(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool parse_u64(const char *data, size_t len, uint64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n if (buf[0] == '-') {\n return false;\n }\n errno = 0;\n char *end;\n *x = strtoull(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool argi64(struct args *args, int idx, int64_t *x) {\n return parse_i64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nbool argu64(struct args *args, int idx, uint64_t *x) {\n return parse_u64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nvoid *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\nvoid store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// Increment a morris counter. The counter is clipped to 31 bits\nuint8_t morris_incr(uint8_t morris, uint64_t rand) {\n return morris>=31?31:morris+!(rand&((UINT64_C(1)< '~') {\n printf(\"\\\\x%02x\", c);\n } else {\n printf(\"%c\", c);\n }\n }\n}\n"], ["/pogocache/src/sys.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit sys.c provides various system-level functions.\n#if __linux__\n#define _GNU_SOURCE\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#ifdef __APPLE__\n#include \n#include \n#endif\n#include \"sys.h\"\n\nint sys_nprocs(void) {\n static atomic_int nprocsa = 0;\n int nprocs = atomic_load_explicit(&nprocsa, __ATOMIC_RELAXED);\n if (nprocs > 0) {\n return nprocs;\n }\n int logical = sysconf(_SC_NPROCESSORS_CONF);\n logical = logical < 1 ? 1 : logical;\n int physical = logical;\n int affinity = physical;\n#ifdef __linux__\n affinity = 0;\n cpu_set_t mask;\n CPU_ZERO(&mask);\n if (sched_getaffinity(0, sizeof(mask), &mask) == -1) {\n perror(\"sched_getaffinity\");\n return 1;\n }\n for (int i = 0; i < CPU_SETSIZE; i++) {\n if (CPU_ISSET(i, &mask)) {\n affinity++;\n }\n }\n double hyper = ceil((double)logical / (double)physical);\n hyper = hyper < 1 ? 1 : hyper;\n affinity /= hyper;\n#endif\n nprocs = affinity;\n nprocs = nprocs < 1 ? 1 : nprocs;\n atomic_store_explicit(&nprocsa, nprocs, __ATOMIC_RELAXED);\n return nprocs;\n}\n\n#ifndef __linux__\n#include \n#endif\n\nsize_t sys_memory(void) {\n size_t sysmem = 0;\n#ifdef __linux__\n FILE *f = fopen(\"/proc/meminfo\", \"rb\");\n if (f) {\n char buf[4096];\n size_t n = fread(buf, 1, sizeof(buf)-1, f);\n buf[n] = '\\0';\n char *s = 0;\n char *e = 0;\n s = strstr(buf, \"MemTotal\");\n if (s) s = strstr(s, \": \");\n if (s) e = strstr(s, \"\\n\");\n if (e) {\n *e = '\\0';\n s += 2;\n while (isspace(*s)) s++;\n if (strstr(s, \" kB\")) {\n s[strstr(s, \" kB\")-s] = '\\0';\n }\n errno = 0;\n char *end;\n int64_t isysmem = strtoll(s, &end, 10);\n assert(errno == 0 && isysmem > 0);\n isysmem *= 1024;\n sysmem = isysmem;\n }\n fclose(f);\n }\n#else\n size_t memsize = 0;\n size_t len = sizeof(memsize);\n if (sysctlbyname(\"hw.memsize\", &memsize, &len, 0, 0) == 0) {\n sysmem = memsize;\n }\n#endif\n if (sysmem == 0) {\n fprintf(stderr, \"# could not detect total system memory, bailing\\n\");\n exit(1);\n }\n return sysmem;\n}\n\nuint64_t sys_seed(void) {\n #define NSEEDCAP 64\n static __thread int nseeds = 0;\n static __thread uint64_t seeds[NSEEDCAP];\n if (nseeds == 0) {\n // Generate a group of new seeds\n FILE *f = fopen(\"/dev/urandom\", \"rb+\");\n if (!f) {\n perror(\"# /dev/urandom\");\n exit(1);\n }\n size_t n = fread(seeds, 8, NSEEDCAP, f);\n (void)n;\n assert(n == NSEEDCAP);\n fclose(f);\n nseeds = NSEEDCAP;\n }\n return seeds[--nseeds];\n}\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// Return monotonic nanoseconds of the CPU clock.\nint64_t sys_now(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// Return unix timestamp in nanoseconds\nint64_t sys_unixnow(void) {\n struct timespec now = { 0 };\n clock_gettime(CLOCK_REALTIME, &now);\n return nanotime(&now);\n}\n\n#ifdef __APPLE__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n task_basic_info_data_t taskInfo;\n mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT;\n kern_return_t kr = task_info(mach_task_self(), TASK_BASIC_INFO,\n (task_info_t)&taskInfo, &infoCount);\n if (kr != KERN_SUCCESS) {\n fprintf(stderr, \"# task_info: %s\\n\", mach_error_string(kr));\n abort();\n }\n info->virt = taskInfo.virtual_size;\n info->rss = taskInfo.resident_size;\n}\n#elif __linux__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n FILE *f = fopen(\"/proc/self/statm\", \"r\");\n if (!f) {\n perror(\"# open /proc/self/statm\");\n abort();\n }\n unsigned long vm_pages, rss_pages;\n long x = fscanf(f, \"%lu %lu\", &vm_pages, &rss_pages);\n fclose(f);\n if (x != 2) {\n perror(\"# read /proc/self/statm\");\n abort();\n }\n\n // Get the system page size (in bytes)\n size_t page_size = sysconf(_SC_PAGESIZE);\n assert(page_size > 0);\n\n // Convert pages to bytes\n info->virt = vm_pages * page_size;\n info->rss = rss_pages * page_size;\n}\n#endif\n\n#include \n\nconst char *sys_arch(void) {\n static __thread bool got = false;\n static __thread char arch[1024] = \"unknown/error\";\n if (!got) {\n struct utsname unameData;\n if (uname(&unameData) == 0) {\n snprintf(arch, sizeof(arch), \"%s/%s\", unameData.sysname, \n unameData.machine);\n char *p = arch;\n while (*p) {\n *p = tolower(*p);\n p++;\n }\n got = true;\n }\n }\n return arch;\n}\n\nvoid sys_genuseid(char useid[16]) {\n const uint8_t chs[] = \n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"0123456789\";\n uint64_t a = sys_seed();\n uint64_t b = sys_seed();\n uint8_t bytes[16];\n memcpy(bytes, &a, 8);\n memcpy(bytes+8, &b, 8);\n for (int i = 0; i < 16; i++) {\n bytes[i] = chs[bytes[i]%62];\n }\n memcpy(useid, bytes, 16);\n}\n\n// Returns a unique thread id for the current thread.\n// This is an artificial generated value that is always distinct. \nuint64_t sys_threadid(void) {\n static atomic_int_fast64_t next = 0;\n static __thread uint64_t id = 0;\n if (id == 0) {\n id = atomic_fetch_add_explicit(&next, 1, __ATOMIC_RELEASE);\n }\n return id;\n}\n"], ["/pogocache/src/memcache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit memcache.c provides the parser for the Memcache wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\nstatic __thread size_t mc_n = 0;\n\nsize_t parse_lastmc_n(void) {\n return mc_n;\n}\n\nbool mc_valid_key(struct args *args, int i) {\n const uint8_t *key = (uint8_t*)args->bufs[i].data;\n size_t len = args->bufs[i].len;\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] == 0x7F) {\n return false;\n }\n }\n return true;\n}\n\nenum mc_cmd { MC_UNKNOWN, \n // writers (optional reply)\n MC_SET, MC_ADD, MC_REPLACE, MC_APPEND, MC_PREPEND, MC_CAS, // storage\n MC_INCR, MC_DECR, // increment/decrement\n MC_FLUSH_ALL, MC_DELETE, // deletion\n MC_TOUCH, // touch\n MC_VERBOSITY, // logging\n // readers (always replys)\n MC_GET, MC_GETS, // retreival\n MC_GAT, MC_GATS, // get and touch\n MC_VERSION, MC_STATS, // information\n MC_QUIT, // client\n};\n\nstatic bool is_mc_store_cmd(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_CAS;\n}\n\nstatic bool is_mc_noreplyable(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_VERBOSITY;\n}\n\nstatic ssize_t parse_memcache_telnet(const char *data, size_t len, \n struct args *args)\n{\n const char *p = data;\n const char *end = data+len;\n const char *s = p;\n char last = 0;\n while (p < end) {\n char ch = *(p++);\n if (ch == ' ') {\n size_t wn = p-s-1;\n // if (wn > 0) {\n args_append(args, s, wn, true);\n s = p;\n continue;\n }\n if (ch == '\\n') {\n size_t wn = p-s-1;\n if (last == '\\r') {\n wn--;\n }\n if (wn > 0) {\n args_append(args, s, wn, true);\n }\n return p-data;\n }\n last = ch;\n }\n return 0;\n}\n\nssize_t parse_memcache(const char *data, size_t len, struct args *args, \n bool *noreply)\n{\n ssize_t n = parse_memcache_telnet(data, len, args);\n if (n <= 0 || args->len == 0) {\n return n;\n }\n // args_print(args);\n mc_n = n;\n enum mc_cmd cmd;\n struct args args2 = { 0 };\n *noreply = false;\n // check for common get-2\n if (args->len == 2 && arg_const_eq(args, 0, \"get\")) {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args->bufs[0].data = \"mget\";\n args->bufs[0].len = 4;\n return n;\n }\n // Check for common set-5 (allows for expiry)\n if (args->len == 5 && arg_const_eq(args, 0, \"set\")) {\n if (args->bufs[2].len == 1 && args->bufs[2].data[0] == '0') {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool expset = false;\n int64_t x;\n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n expset = true;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n // replace the \"flags\" with a value\n args->bufs[2].len = value_len;\n args->bufs[2].data = (void*)value;\n args->len = 3;\n if (expset) {\n // add the \"ex \" to last two arguments\n args->bufs[4] = args->bufs[3];\n args->bufs[3].data = \"ex\";\n args->bufs[3].len = 2;\n args->len = 5;\n }\n return n;\n } else {\n // flags was set, use plus branch\n cmd = MC_SET;\n goto set_plus;\n }\n }\n // Otherwise use lookup command table. This could be optimized into a\n // switch table or hash table. See cmds.c for hash table example.\n cmd =\n arg_const_eq(args, 0, \"set\") ? MC_SET : // XY\n arg_const_eq(args, 0, \"add\") ? MC_ADD : // XY\n arg_const_eq(args, 0, \"cas\") ? MC_CAS : // XY\n arg_const_eq(args, 0, \"replace\") ? MC_REPLACE : // XY\n arg_const_eq(args, 0, \"get\") ? MC_GET : // XY\n arg_const_eq(args, 0, \"delete\") ? MC_DELETE : // XY\n arg_const_eq(args, 0, \"append\") ? MC_APPEND : // XY\n arg_const_eq(args, 0, \"prepend\") ? MC_PREPEND : // XY\n arg_const_eq(args, 0, \"gets\") ? MC_GETS : // XY\n arg_const_eq(args, 0, \"incr\") ? MC_INCR : // XY\n arg_const_eq(args, 0, \"decr\") ? MC_DECR: // XY\n arg_const_eq(args, 0, \"touch\") ? MC_TOUCH : // X\n arg_const_eq(args, 0, \"gat\") ? MC_GAT : // X\n arg_const_eq(args, 0, \"gats\") ? MC_GATS : // X\n arg_const_eq(args, 0, \"flush_all\") ? MC_FLUSH_ALL : // X\n arg_const_eq(args, 0, \"stats\") ? MC_STATS : // X\n arg_const_eq(args, 0, \"version\") ? MC_VERSION : // X\n arg_const_eq(args, 0, \"quit\") ? MC_QUIT : // XY\n arg_const_eq(args, 0, \"verbosity\") ? MC_VERBOSITY : // X\n MC_UNKNOWN;\n if (cmd == MC_UNKNOWN) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (is_mc_noreplyable(cmd)) {\n if (arg_const_eq(args, args->len-1, \"noreply\")) {\n *noreply = true;\n buf_clear(&args->bufs[args->len-1]);\n args->len--;\n }\n }\n if (is_mc_store_cmd(cmd)) {\n // Store commands include 'set', 'add', 'replace', 'append', 'prepend',\n // and 'cas'.\n if ((cmd == MC_CAS && args->len != 6) && \n (cmd != MC_CAS && args->len != 5))\n {\n parse_seterror(\"ERROR\");\n return -1;\n }\n set_plus:\n // check all values before continuing\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n int64_t x;\n if (!argi64(args, 2, &x) || x < 0) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool hascas = false;\n char cas[24] = \"0\";\n if (cmd == MC_CAS) {\n hascas = true;\n uint64_t y;\n if (!argu64(args, 5, &y)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n assert(args->bufs[5].len < sizeof(cas));\n memcpy(cas, args->bufs[5].data, args->bufs[5].len);\n cas[args->bufs[5].len] = '\\0';\n buf_clear(&args->bufs[5]);\n args->len--;\n }\n\n // Storage commands must read a value that follows the first line.\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n\n // Reconstruct the command into a RESP format. \n bool is_append_prepend = false;\n switch (cmd) {\n case MC_APPEND:\n args_append(&args2, \"append\", 6, true);\n is_append_prepend = true;\n break;\n case MC_PREPEND:\n args_append(&args2, \"prepend\", 7, true);\n is_append_prepend = true;\n break;\n default:\n args_append(&args2, \"set\", 3, true);\n break;\n }\n // Move key arg to new args\n take_and_append_arg(1);\n // Add value arg\n args_append(&args2, value, value_len, true);\n if (!is_append_prepend) {\n if (!(args->bufs[2].len == 1 && args->bufs[2].data[0] == '0')) {\n args_append(&args2, \"flags\", 5, true);\n take_and_append_arg(2);\n }\n \n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n args_append(&args2, \"ex\", 2, true);\n take_and_append_arg(3);\n }\n if (cmd == MC_ADD) {\n args_append(&args2, \"nx\", 2, true);\n } else if (cmd == MC_REPLACE) {\n args_append(&args2, \"xx\", 2, true);\n }\n if (hascas) {\n args_append(&args2, \"cas\", 3, true);\n args_append(&args2, cas, strlen(cas), false);\n }\n }\n } else if (cmd == MC_GET) {\n // Convert 'get * into 'MGET *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mget\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_DELETE) {\n // Convert 'delete ' into 'DEL '\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (args->len > 2) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"del\", 3, true);\n take_and_append_arg(1);\n } else if (cmd == MC_GETS) {\n // Convert 'gets * into 'MGETS *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mgets\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GAT) {\n // Convert 'gat * into 'gat *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gat\", 3, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GATS) {\n // Convert 'gats * into 'gats *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gats\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_STATS) {\n args_append(&args2, \"stats\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_INCR) {\n // Convert 'incr into 'uincrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"uincrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_DECR) {\n // Convert 'decr into 'udecrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"udecrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_TOUCH) {\n // Convert 'touch ' into 'expire '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"expire\", 6, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_FLUSH_ALL) {\n // Convert 'flush_all [delay]' into 'FLUSHALL [DELAY seconds]'\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"flushall\", 8, true);\n if (args->len == 2) {\n args_append(&args2, \"delay\", 5, true);\n take_and_append_arg(1);\n }\n } else if (cmd == MC_QUIT) {\n args_append(&args2, \"quit\", 4, true);\n *noreply = true;\n } else if (cmd == MC_VERSION) {\n args_append(&args2, \"version\", 7, true);\n *noreply = false;\n } else if (cmd == MC_VERBOSITY) {\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"verbosity\", 7, true);\n take_and_append_arg(1);\n } else {\n return -1;\n }\n args_free(args);\n *args = args2;\n return n;\n}\n"], ["/pogocache/src/resp.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit resp.c provides the parser for the RESP wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args) {\n char *err = NULL;\n struct buf arg = { 0 };\n bool inarg = false;\n char quote = '\\0';\n for (size_t i = 0; i < len; i++) {\n char ch = bytes[i];\n if (inarg) {\n if (quote) {\n if (ch == '\\n') {\n goto fail_quotes;\n }\n if (ch == quote) { \n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n inarg = false;\n if (ch == '\\n') {\n i--;\n continue;\n }\n if (!isspace(ch)) {\n goto fail_quotes;\n }\n continue;\n } else if (ch == '\\\\') {\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n switch (ch) {\n case 'n': ch = '\\n'; break;\n case 'r': ch = '\\r'; break;\n case 't': ch = '\\t'; break;\n }\n }\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n } else {\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else if (isspace(ch)) {\n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n if (ch == '\\n') {\n break;\n }\n inarg = false;\n } else {\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n } else {\n if (ch == '\\n') {\n buf_clear(&arg);\n return i+1;\n }\n if (isspace(ch)) {\n continue;\n }\n inarg = true;\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else {\n quote = 0;\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n }\n buf_clear(&arg);\n return 0;\nfail_quotes:\n if (!err) err = \"ERR Protocol error: unbalanced quotes in request\";\nfail_nargs:\n if (!err) err = \"ERR Protocol error: invalid multibulk length\";\nfail_argsz:\n if (!err) err = \"ERR Protocol error: invalid bulk length\";\n/* fail: */\n if (err) {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \"%s\", err);\n }\n buf_clear(&arg);\n return -1;\n}\n\nstatic int64_t read_num(const char *data, size_t len, int64_t min, int64_t max,\n bool *ok)\n{\n errno = 0;\n char *end;\n int64_t x = strtoll(data, &end, 10);\n *ok = errno == 0 && (size_t)(end-data) == len && x >= min && x <= max;\n return x;\n}\n\n#define read_resp_num(var, min, max, errmsg) { \\\n char *p = memchr(bytes, '\\r', end-bytes); \\\n if (!p) { \\\n if (end-bytes > 32) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n return 0; \\\n } \\\n if (p+1 == end) { \\\n return 0; \\\n } \\\n if (*(p+1) != '\\n') { \\\n return -1; \\\n } \\\n bool ok; \\\n var = read_num(bytes, p-bytes, min, max, &ok); \\\n if (!ok) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n bytes = p+2; \\\n}\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp(const char *bytes, size_t len, struct args *args) {\n const char *start = bytes;\n const char *end = bytes+len;\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '*') {\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nargs;\n read_resp_num(nargs, LONG_MIN, MAXARGS, \"invalid multibulk length\");\n for (int j = 0; j < nargs; j++) {\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '$') {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \n \"ERR Protocol error: expected '$', got '%c'\", *(bytes-1));\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nbytes;\n read_resp_num(nbytes, 0, MAXARGSZ, \"invalid bulk length\");\n if (nbytes+2 > end-bytes) {\n return 0;\n }\n args_append(args, bytes, nbytes, true);\n bytes += nbytes+2;\n }\n return bytes-start;\n}\n\n"], ["/pogocache/src/http.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit http.c provides the parser for the HTTP wire protocol.\n#define _GNU_SOURCE \n#include \n#include \n#include \n#include \n#include \"stats.h\"\n#include \"util.h\"\n#include \"parse.h\"\n\nextern const bool useauth;\nextern const char *auth;\n\nbool http_valid_key(const char *key, size_t len) {\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] >= 0x7F || key[i] == '%' || key[i] == '+' ||\n key[i] == '@' || key[i] == '$' || key[i] == '?' || key[i] == '=') \n {\n return false;\n }\n }\n return true;\n}\n\nssize_t parse_http(const char *data, size_t len, struct args *args, \n int *httpvers, bool *keepalive)\n{\n *keepalive = false;\n *httpvers = 0;\n const char *method = 0;\n size_t methodlen = 0;\n const char *uri = 0;\n size_t urilen = 0;\n int proto = 0;\n const char *hdrname = 0; \n size_t hdrnamelen = 0;\n const char *hdrval = 0;\n size_t hdrvallen = 0;\n size_t bodylen = 0;\n bool nocontentlength = true;\n bool html = false;\n const char *authhdr = 0;\n size_t authhdrlen = 0;\n const char *p = data;\n const char *e = p+len;\n const char *s = p;\n while (p < e) {\n if (*p == ' ') {\n method = s;\n methodlen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == ' ') {\n uri = s;\n urilen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n if (p-s-1 != 8 || !bytes_const_eq(s, 5, \"HTTP/\") || \n s[5] < '0' || s[5] > '9' || s[6] != '.' || \n s[7] < '0' || s[7] > '9')\n {\n goto badproto;\n }\n proto = (s[5]-'0')*10+(s[7]-'0');\n if (proto < 9 || proto >= 30) {\n goto badproto;\n }\n if (proto >= 11) {\n *keepalive = true;\n }\n *httpvers = proto;\n p++;\n goto readhdrs;\n }\n \n p++;\n }\n goto badreq;\nreadhdrs:\n // Parse the headers, pulling the pairs along the way.\n while (p < e) {\n hdrname = p;\n while (p < e) {\n if (*p == ':') {\n hdrnamelen = p-hdrname;\n p++;\n while (p < e && *p == ' ') {\n p++;\n }\n hdrval = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n hdrvallen = p-hdrval-1;\n // printf(\"[%.*s]=[%.*s]\\n\", (int)hdrnamelen, hdrname,\n // (int)hdrvallen, hdrval);\n // We have a new header pair (hdrname, hdrval);\n if (argeq_bytes(hdrname, hdrnamelen, \"content-length\")){\n uint64_t x;\n if (!parse_u64(hdrval, hdrvallen, &x) || \n x > MAXARGSZ)\n {\n stat_store_too_large_incr(0);\n goto badreq;\n }\n bodylen = x;\n nocontentlength = false;\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"connection\"))\n {\n *keepalive = argeq_bytes(hdrval, hdrvallen, \n \"keep-alive\");\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"accept\"))\n {\n if (memmem(hdrval, hdrvallen, \"text/html\", 9) != 0){\n html = true;\n }\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"authorization\"))\n {\n authhdr = hdrval;\n authhdrlen = hdrvallen;\n }\n p++;\n if (p < e && *p == '\\r') {\n p++;\n if (p < e && *p == '\\n') {\n p++;\n } else {\n goto badreq;\n }\n goto readbody;\n }\n break;\n }\n p++;\n }\n break;\n }\n p++;\n }\n }\n return 0;\nreadbody:\n // read the content body\n if ((size_t)(e-p) < bodylen) {\n return 0;\n }\n const char *body = p;\n p = e;\n\n // check\n if (urilen == 0 || uri[0] != '/') {\n goto badreq;\n }\n uri++;\n urilen--;\n const char *ex = 0;\n size_t exlen = 0;\n const char *flags = 0;\n size_t flagslen = 0;\n const char *cas = 0;\n size_t caslen = 0;\n const char *qauth = 0;\n size_t qauthlen = 0;\n bool xx = false;\n bool nx = false;\n // Parse the query string, pulling the pairs along the way.\n size_t querylen = 0;\n const char *query = memchr(uri, '?', urilen);\n if (query) {\n querylen = urilen-(query-uri);\n urilen = query-uri;\n query++;\n querylen--;\n const char *qkey;\n size_t qkeylen;\n const char *qval;\n size_t qvallen;\n size_t j = 0;\n size_t k = 0;\n for (size_t i = 0; i < querylen; i++) {\n if (query[i] == '=') {\n k = i;\n i++;\n for (; i < querylen; i++) {\n if (query[i] == '&') {\n break;\n }\n }\n qval = query+k+1;\n qvallen = i-k-1;\n qkeyonly:\n qkey = query+j;\n qkeylen = k-j;\n // We have a new query pair (qkey, qval);\n if (bytes_const_eq(qkey, qkeylen, \"flags\")) {\n flags = qval;\n flagslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"ex\") || \n bytes_const_eq(qkey, qkeylen, \"ttl\"))\n {\n ex = qval;\n exlen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"cas\")) {\n cas = qval;\n caslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"xx\")) {\n xx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"nx\")) {\n nx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"auth\")) {\n qauth = qval;\n qauthlen = qvallen;\n }\n j = i+1;\n } else if (query[i] == '&' || i == querylen-1) {\n qval = 0;\n qvallen = 0;\n if (i == querylen-1) {\n i++;\n }\n k = i;\n goto qkeyonly;\n }\n }\n }\n // The entire HTTP request is complete.\n // Turn request into valid command arguments.\n if (bytes_const_eq(method, methodlen, \"GET\")) {\n if (urilen > 0 && uri[0] == '@') {\n // system command such as @stats or @flushall\n goto badreq;\n } else if (urilen == 0) {\n goto showhelp;\n } else {\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"get\", 3, true);\n args_append(args, uri, urilen, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"PUT\")) {\n if (nocontentlength) {\n // goto badreq;\n }\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"set\", 3, true);\n args_append(args, uri, urilen, true);\n args_append(args, body, bodylen, true);\n if (cas) {\n args_append(args, \"cas\", 3, true);\n args_append(args, cas, caslen, true);\n }\n if (ex) {\n args_append(args, \"ex\", 2, true);\n args_append(args, ex, exlen, true);\n }\n if (flags) {\n args_append(args, \"flags\", 5, true);\n args_append(args, flags, flagslen, true);\n }\n if (xx) {\n args_append(args, \"xx\", 2, true);\n }\n if (nx) {\n args_append(args, \"nx\", 2, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"DELETE\")) {\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"del\", 3, true);\n args_append(args, uri, urilen, true);\n } else {\n parse_seterror(\"Method Not Allowed\");\n goto badreq;\n }\n\n // Check authorization\n const char *authval = 0;\n size_t authvallen = 0;\n if (qauthlen > 0) {\n authval = qauth;\n authvallen = qauthlen;\n } else if (authhdrlen > 0) {\n if (authhdrlen >= 7 && strncmp(authhdr, \"Bearer \", 7) == 0) {\n authval = authhdr + 7;\n authvallen = authhdrlen - 7;\n } else {\n goto unauthorized;\n }\n }\n if (useauth || authvallen > 0) {\n stat_auth_cmds_incr(0);\n size_t authlen = strlen(auth);\n if (authvallen != authlen || memcmp(auth, authval, authlen) != 0) {\n stat_auth_errors_incr(0);\n goto unauthorized;\n }\n\n }\n return e-data;\nbadreq:\n parse_seterror(\"Bad Request\");\n return -1;\nbadproto:\n parse_seterror(\"Bad Request\");\n return -1;\nbadkey:\n parse_seterror(\"Invalid Key\");\n return -1;\nunauthorized:\n parse_seterror(\"Unauthorized\");\n return -1;\nshowhelp:\n if (html) {\n parse_seterror(\"Show Help HTML\");\n } else {\n parse_seterror(\"Show Help TEXT\");\n }\n return -1;\n}\n"], ["/pogocache/src/parse.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit parse.c provides the entrypoint for parsing all data \n// for incoming client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n\n__thread char parse_lasterr[1024] = \"\";\n\nconst char *parse_lasterror(void) {\n return parse_lasterr;\n}\n\nssize_t parse_resp(const char *bytes, size_t len, struct args *args);\nssize_t parse_memcache(const char *data, size_t len, struct args *args,\n bool *noreply);\nssize_t parse_http(const char *data, size_t len, struct args *args,\n int *httpvers, bool *keepalive);\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args);\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pg);\n\nstatic bool sniff_proto(const char *data, size_t len, int *proto) {\n if (len > 0 && data[0] == '*') {\n *proto = PROTO_RESP;\n return true;\n }\n if (len > 0 && data[0] == '\\0') {\n *proto = PROTO_POSTGRES;\n return true;\n }\n // Parse the first line of text\n size_t n = 0;\n for (size_t i = 0; i < len; i++) {\n if (data[i] == '\\n') {\n n = i+1;\n break;\n }\n }\n // Look for \" HTTP/*.*\\r\\n\" suffix\n if (n >= 11 && memcmp(data+n-11, \" HTTP/\", 5) == 0 && \n data[n-4] == '.' && data[n-2] == '\\r')\n {\n *proto = PROTO_HTTP;\n return true;\n }\n // Trim the prefix, Resp+Telnet and Memcache both allow for spaces between\n // arguments.\n while (*data == ' ') {\n data++;\n n--;\n len--;\n }\n // Treat all uppercase commands as Resp+Telnet\n if (n > 0 && data[0] >= 'A' && data[0] <= 'Z') {\n *proto = PROTO_RESP;\n return true;\n }\n // Look for Memcache commands\n if (n >= 1) {\n *proto = PROTO_MEMCACHE;\n return true;\n }\n // Protocol is unknown\n *proto = 0;\n return false;\n}\n\n// Returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\n// On success, the args and proto will be set to the command arguments and\n// protocol type, respectively.\n//\n// It's required to set proto to 0 for the first command, per client.\n// Then continue to provide the last known proto. \n// This allows for the parser to learn and predict the protocol for ambiguous\n// protocols; like Resp+Telnet, Memcache+Text, HTTP, etc.\n//\n// The noreply param is an output param that is only set when the proto is\n// memcache. The argument is stripped from the args array,\n// but made available to the caller in case it needs to be known.\n//\n// The keepalive param is an output param that is only set when the proto is\n// http. It's used to let the caller know to keep the connection alive for\n// another request.\nssize_t parse_command(const void *data, size_t len, struct args *args, \n int *proto, bool *noreply, int *httpvers, bool *keepalive, struct pg **pg)\n{\n args_clear(args);\n parse_lasterr[0] = '\\0';\n *httpvers = 0;\n *noreply = false;\n *keepalive = false;\n // Sniff for the protocol. This should only happen once per client, upon\n // their first request.\n if (*proto == 0) {\n if (!sniff_proto(data, len, proto)) {\n // Unknown protocol\n goto fail;\n }\n if (*proto == 0) {\n // Not enough data to determine yet\n return 0;\n }\n }\n if (*proto == PROTO_RESP) {\n const uint8_t *bytes = data;\n if (bytes[0] == '*') {\n return parse_resp(data, len, args);\n } else {\n return parse_resp_telnet(data, len, args);\n }\n } else if (*proto == PROTO_MEMCACHE) {\n return parse_memcache(data, len, args, noreply);\n } else if (*proto == PROTO_HTTP) {\n return parse_http(data, len, args, httpvers, keepalive);\n } else if (*proto == PROTO_POSTGRES) {\n return parse_postgres(data, len, args, pg);\n }\nfail:\n parse_seterror(\"ERROR\");\n return -1;\n}\n\n"], ["/pogocache/src/args.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit args.c provides functions for managing command arguments\n#include \n#include \n#include \n#include \"args.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n\nconst char *args_at(struct args *args, int idx, size_t *len) {\n *len = args->bufs[idx].len;\n return args->bufs[idx].data;\n}\n\nint args_count(struct args *args) {\n return args->len;\n}\n\nbool args_eq(struct args *args, int index, const char *str) {\n if ((size_t)index >= args->len) {\n return false;\n }\n size_t alen = args->bufs[index].len;\n const char *arg = args->bufs[index].data;\n size_t slen = strlen(str); \n if (alen != slen) {\n return false;\n }\n for (size_t i = 0; i < slen ; i++) {\n if (tolower(str[i]) != tolower(arg[i])) {\n return false;\n }\n }\n return true;\n}\n\nvoid args_append(struct args *args, const char *data, size_t len,\n bool zerocopy)\n{\n#ifdef NOZEROCOPY\n zerocopy = 0;\n#endif\n if (args->len == args->cap) {\n args->cap = args->cap == 0 ? 4 : args->cap*2;\n args->bufs = xrealloc(args->bufs, args->cap * sizeof(struct buf));\n memset(&args->bufs[args->len], 0, (args->cap-args->len) * \n sizeof(struct buf));\n }\n if (zerocopy) {\n buf_clear(&args->bufs[args->len]);\n args->bufs[args->len].len = len;\n args->bufs[args->len].data = (char*)data;\n } else {\n args->bufs[args->len].len = 0;\n buf_append(&args->bufs[args->len], data, len);\n }\n if (args->len == 0) {\n args->zerocopy = zerocopy;\n } else {\n args->zerocopy = args->zerocopy && zerocopy;\n }\n args->len++;\n}\n\nvoid args_clear(struct args *args) {\n if (!args->zerocopy) {\n for (size_t i = 0 ; i < args->len; i++) {\n buf_clear(&args->bufs[i]);\n }\n }\n args->len = 0;\n}\n\nvoid args_free(struct args *args) {\n args_clear(args);\n xfree(args->bufs);\n}\n\nvoid args_print(struct args *args) {\n printf(\". \");\n for (size_t i = 0; i < args->len; i++) {\n char *buf = args->bufs[i].data;\n int len = args->bufs[i].len;\n printf(\"[\"); \n binprint(buf, len);\n printf(\"] \");\n }\n printf(\"\\n\");\n}\n\n// remove the first item\nvoid args_remove_first(struct args *args) {\n if (args->len > 0) {\n buf_clear(&args->bufs[0]);\n for (size_t i = 1; i < args->len; i++) {\n args->bufs[i-1] = args->bufs[i];\n }\n args->len--;\n }\n}\n"], ["/pogocache/src/xmalloc.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit xmalloc.c is the primary allocator interface. The xmalloc/xfree\n// functions should be used instead of malloc/free.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#if defined(__linux__) && defined(__GLIBC__)\n#include \n#define HAS_MALLOC_H\n#endif\n\n// from main.c\nextern const int useallocator;\nextern const bool usetrackallocs;\n\n#ifdef NOTRACKALLOCS\n#define add_alloc()\n#define sub_alloc()\nsize_t xallocs(void) {\n return 0;\n}\n#else\nstatic atomic_int_fast64_t nallocs = 0;\n\nsize_t xallocs(void) {\n if (usetrackallocs) {\n return atomic_load(&nallocs);\n } else {\n return 0;\n }\n}\n\nstatic void add_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_add_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n\nstatic void sub_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_sub_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n#endif\n\nstatic void check_ptr(void *ptr) {\n if (!ptr) {\n fprintf(stderr, \"# %s\\n\", strerror(ENOMEM));\n abort();\n }\n}\n\nvoid *xmalloc(size_t size) {\n void *ptr = malloc(size);\n check_ptr(ptr);\n add_alloc();\n return ptr;\n}\n\nvoid *xrealloc(void *ptr, size_t size) {\n if (!ptr) {\n return xmalloc(size);\n }\n ptr = realloc(ptr, size);\n check_ptr(ptr);\n return ptr;\n}\n\nvoid xfree(void *ptr) {\n if (!ptr) {\n return;\n }\n free(ptr);\n sub_alloc();\n}\n\nvoid xpurge(void) {\n#ifdef HAS_MALLOC_H\n // Releases unused heap memory to OS\n malloc_trim(0);\n#endif\n}\n"], ["/pogocache/src/buf.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit buf.c is a simple interface for creating byte buffers\n#include \n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"buf.h\"\n\nvoid buf_ensure(struct buf *buf, size_t len) {\n if (buf->len+len > buf->cap) {\n size_t oldcap = buf->cap;\n size_t newcap = buf->cap;\n if (oldcap == 0) {\n buf->data = 0;\n newcap = 16;\n } else {\n newcap *= 2;\n }\n while (buf->len+len > newcap) {\n newcap *= 2;\n }\n buf->data = xrealloc(buf->data, newcap);\n buf->cap = newcap;\n }\n}\n\nvoid buf_append(struct buf *buf, const void *data, size_t len){\n buf_ensure(buf, len);\n memcpy(buf->data+buf->len, data, len);\n buf->len += len;\n}\n\nvoid buf_append_byte(struct buf *buf, char byte) {\n if (buf->len < buf->cap) {\n buf->data[buf->len++] = byte;\n } else {\n buf_append(buf, &byte, 1);\n }\n}\n\nvoid buf_clear(struct buf *buf) {\n // No capacity means this buffer is owned somewhere else and we \n // must not free the data.\n if (buf->cap) {\n xfree(buf->data);\n }\n memset(buf, 0, sizeof(struct buf));\n}\n\nvoid buf_append_uvarint(struct buf *buf, uint64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_u64(buf->data+buf->len, x);\n buf->len += n;\n}\n\nvoid buf_append_varint(struct buf *buf, int64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_i64(buf->data+buf->len, x);\n buf->len += n;\n}\n"], ["/pogocache/src/stats.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit stats.c tracks various stats. Mostly for the memcache protocol.\n#include \n#include \"stats.h\"\n\nstatic atomic_uint_fast64_t g_stat_cmd_flush = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_touch = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_meta = 0;\nstatic atomic_uint_fast64_t g_stat_get_expired = 0;\nstatic atomic_uint_fast64_t g_stat_get_flushed = 0;\nstatic atomic_uint_fast64_t g_stat_delete_misses = 0;\nstatic atomic_uint_fast64_t g_stat_delete_hits = 0;\nstatic atomic_uint_fast64_t g_stat_incr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_incr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_decr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_decr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_misses = 0;\nstatic atomic_uint_fast64_t g_stat_cas_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_badval = 0;\nstatic atomic_uint_fast64_t g_stat_touch_hits = 0;\nstatic atomic_uint_fast64_t g_stat_touch_misses = 0;\nstatic atomic_uint_fast64_t g_stat_store_too_large = 0;\nstatic atomic_uint_fast64_t g_stat_store_no_memory = 0;\nstatic atomic_uint_fast64_t g_stat_auth_cmds = 0;\nstatic atomic_uint_fast64_t g_stat_auth_errors = 0;\n\nvoid stat_cmd_flush_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_flush, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_touch_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_touch, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_meta_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_meta, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_expired_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_expired, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_flushed_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_flushed, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_badval_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_badval, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_too_large_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_too_large, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_no_memory_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_no_memory, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_cmds_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_cmds, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_errors_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_errors, 1, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_flush(void) {\n return atomic_load_explicit(&g_stat_cmd_flush, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_touch(void) {\n return atomic_load_explicit(&g_stat_cmd_touch, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_meta(void) {\n return atomic_load_explicit(&g_stat_cmd_meta, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_expired(void) {\n return atomic_load_explicit(&g_stat_get_expired, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_flushed(void) {\n return atomic_load_explicit(&g_stat_get_flushed, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_misses(void) {\n return atomic_load_explicit(&g_stat_delete_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_hits(void) {\n return atomic_load_explicit(&g_stat_delete_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_misses(void) {\n return atomic_load_explicit(&g_stat_incr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_hits(void) {\n return atomic_load_explicit(&g_stat_incr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_misses(void) {\n return atomic_load_explicit(&g_stat_decr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_hits(void) {\n return atomic_load_explicit(&g_stat_decr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_misses(void) {\n return atomic_load_explicit(&g_stat_cas_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_hits(void) {\n return atomic_load_explicit(&g_stat_cas_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_badval(void) {\n return atomic_load_explicit(&g_stat_cas_badval, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_hits(void) {\n return atomic_load_explicit(&g_stat_touch_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_misses(void) {\n return atomic_load_explicit(&g_stat_touch_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_too_large(void) {\n return atomic_load_explicit(&g_stat_store_too_large, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_no_memory(void) {\n return atomic_load_explicit(&g_stat_store_no_memory, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_cmds(void) {\n return atomic_load_explicit(&g_stat_auth_cmds, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_errors(void) {\n return atomic_load_explicit(&g_stat_auth_errors, __ATOMIC_RELAXED);\n}\n\n\n"], ["/pogocache/src/uring.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n#include \n#include \"uring.h\"\n\nbool uring_available(void) {\n#ifdef NOURING\n return false;\n#else\n static atomic_int available = -1;\n if (atomic_load_explicit(&available, __ATOMIC_ACQUIRE )== -1) {\n struct io_uring ring;\n if (io_uring_queue_init(1, &ring, 0) == 0) {\n io_uring_queue_exit(&ring);\n atomic_store(&available, 1);\n } else {\n atomic_store(&available, 0);\n }\n }\n return atomic_load_explicit(&available, __ATOMIC_ACQUIRE) == 1;\n#endif\n}\n"]], "task_instance_info": {"created_time": "2025-08-20 18:04:05", "created_task_model": "DeepSeek-R1", "function_skeleton": "bool tls_accept(int fd, struct tls **tls_out) {}", "code_description": "This code segment functions to: Establish a secure connection by performing a TLS handshake negotiation and creating a session context for encrypted communication channels."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```c\n[TASK_BEGIN]\nbool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n *tls_out = NULL;\n return true;\n }\n struct tls *tls = xmalloc(sizeof(*tls));\n tls->ssl = SSL_new(ctx);\n SSL_set_fd(tls->ssl, fd);\n if (SSL_accept(tls->ssl) <= 0) {\n SSL_free(tls->ssl);\n xfree(tls);\n *tls_out = NULL;\n return false;\n }\n *tls_out = tls;\n return true;\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 18-04-33"}, "editdistance_info": {"edit_distance": 33.913, "calculate_time": "2025-08-20 18:04:33", "true_code_clean": "bool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n *tls_out = 0;\n return true;\n }\n SSL *ssl = SSL_new(ctx);\n if (!ssl) {\n printf(\"# tls: SSL_new() failed\\n\");\n *tls_out = 0;\n return false;\n }\n SSL_set_fd(ssl, fd);\n SSL_set_verify(ssl, SSL_VERIFY_PEER, 0);\n int ret = SSL_accept(ssl);\n if (ret <= 0) {\n int err = SSL_get_error(ssl, ret);\n if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {\n printf(\"# tls: SSL_accept() failed\\n\");\n ERR_print_errors_fp(stderr);\n SSL_free(ssl);\n *tls_out = 0;\n return false;\n }\n }\n struct tls *tls = xmalloc(sizeof(struct tls));\n memset(tls, 0, sizeof(struct tls));\n tls->ssl = ssl;\n *tls_out = tls;\n return true;\n}", "predict_code_clean": "bool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n *tls_out = NULL;\n return true;\n }\n struct tls *tls = xmalloc(sizeof(*tls));\n tls->ssl = SSL_new(ctx);\n SSL_set_fd(tls->ssl, fd);\n if (SSL_accept(tls->ssl) <= 0) {\n SSL_free(tls->ssl);\n xfree(tls);\n *tls_out = NULL;\n return false;\n }\n *tls_out = tls;\n return true;\n}"}} {"repo_name": "pogocache", "file_name": "/pogocache/src/tls.c", "inference_info": {"prefix_code": "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit tls.c provides an interface for translating TLS bytes streams.\n// This is intended to be used with client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"tls.h\"\n#include \"xmalloc.h\"\n#include \"openssl.h\"\n\n#ifdef NOOPENSSL\n\nvoid tls_init(void) {}\nbool tls_accept(int fd, struct tls **tls_out) {\n (void)fd;\n *tls_out = 0;\n return true;\n}\nint tls_close(struct tls *tls, int fd) {\n (void)tls;\n return close(fd);\n}\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n (void)tls;\n return read(fd, data, len);\n}\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n (void)tls;\n return write(fd, data, len);\n}\n#else\n\nextern const bool usetls;\nextern const char *tlscertfile;\nextern const char *tlscacertfile;\nextern const char *tlskeyfile;\n\nstatic SSL_CTX *ctx;\n\nstruct tls {\n SSL *ssl;\n};\n\nvoid tls_init(void) {\n if (!usetls) {\n return;\n }\n ctx = SSL_CTX_new(TLS_server_method());\n if (!SSL_CTX_load_verify_locations(ctx, tlscacertfile, 0)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(1);\n }\n if (!SSL_CTX_use_certificate_file(ctx, tlscertfile , SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_use_PrivateKey_file(ctx, tlskeyfile, SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_check_private_key(ctx)) {\n printf(\"# tls: private key does not match the certificate\\n\");\n exit(EXIT_FAILURE);\n }\n}\n\n", "suffix_code": "\n\nint tls_close(struct tls *tls, int fd) {\n if (tls) {\n if (SSL_shutdown(tls->ssl) == 0) {\n SSL_shutdown(tls->ssl);\n }\n SSL_free(tls->ssl);\n xfree(tls);\n }\n return close(fd);\n}\n\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n if (!tls) {\n return write(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_write_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else {\n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n if (!tls) {\n return read(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_read_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else { \n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\n#endif\n", "middle_code": "bool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n *tls_out = 0;\n return true;\n }\n SSL *ssl = SSL_new(ctx);\n if (!ssl) {\n printf(\"# tls: SSL_new() failed\\n\");\n *tls_out = 0;\n return false;\n }\n SSL_set_fd(ssl, fd);\n SSL_set_verify(ssl, SSL_VERIFY_PEER, 0);\n int ret = SSL_accept(ssl);\n if (ret <= 0) {\n int err = SSL_get_error(ssl, ret);\n if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {\n printf(\"# tls: SSL_accept() failed\\n\");\n ERR_print_errors_fp(stderr);\n SSL_free(ssl);\n *tls_out = 0;\n return false;\n }\n }\n struct tls *tls = xmalloc(sizeof(struct tls));\n memset(tls, 0, sizeof(struct tls));\n tls->ssl = ssl;\n *tls_out = tls;\n return true;\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "c", "sub_task_type": null}, "context_code": [["/pogocache/src/net.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit net.c provides most network functionality, including listening on ports,\n// thread creation, event queue handling, and reading & writing sockets.\n#define _GNU_SOURCE\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#ifdef __linux__\n#include \n#include \n#include \n#include \n#else\n#include \n#endif\n\n#include \"uring.h\"\n#include \"stats.h\"\n#include \"net.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"xmalloc.h\"\n\n#define PACKETSIZE 16384\n#define MINURINGEVENTS 2 // there must be at least 2 events for uring use\n\nextern const int verb;\n\nstatic int setnonblock(int fd) {\n int flags = fcntl(fd, F_GETFL, 0);\n if (flags == -1) {\n return -1;\n }\n return fcntl(fd, F_SETFL, flags | O_NONBLOCK);\n}\n\nstatic int settcpnodelay(int fd, bool nodelay) {\n int val = nodelay;\n return setsockopt(fd, SOL_SOCKET, TCP_NODELAY, &val, sizeof(val)) == 0;\n}\n\nstatic int setquickack(int fd, bool quickack) {\n#if defined(__linux__)\n int val = quickack;\n return setsockopt(fd, SOL_SOCKET, TCP_QUICKACK, &val, sizeof(val)) == 0;\n#else\n (void)fd, (void)quickack;\n return 0;\n#endif\n}\n\nstatic int setkeepalive(int fd, bool keepalive) {\n int val = keepalive;\n if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val))) {\n return -1;\n }\n#if defined(__linux__)\n if (!keepalive) {\n return 0;\n }\n // tcp_keepalive_time\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &(int){300}, sizeof(int))) \n {\n return -1;\n }\n // tcp_keepalive_intvl\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &(int){30}, sizeof(int)))\n {\n return -1;\n }\n // tcp_keepalive_probes\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &(int){3}, sizeof(int))) {\n return -1;\n }\n#endif\n return 0;\n}\n\n#ifdef __linux__\ntypedef struct epoll_event event_t;\n#else\ntypedef struct kevent event_t;\n#endif\n\nstatic int event_fd(event_t *ev) {\n#ifdef __linux__\n return ev->data.fd;\n#else\n return ev->ident;\n#endif\n}\n\nstatic int getevents(int fd, event_t evs[], int nevs, bool wait_forever, \n int64_t timeout)\n{\n if (wait_forever) {\n#ifdef __linux__\n return epoll_wait(fd, evs, nevs, -1);\n#else\n return kevent(fd, NULL, 0, evs, nevs, 0);\n#endif\n } else {\n timeout = timeout < 0 ? 0 : \n timeout > 900000000 ? 900000000 : // 900ms\n timeout;\n#ifdef __linux__\n timeout = timeout / 1000000;\n return epoll_wait(fd, evs, nevs, timeout);\n#else\n struct timespec timespec = { .tv_nsec = timeout };\n return kevent(fd, NULL, 0, evs, nevs, ×pec);\n#endif\n }\n}\n\nstatic int addread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN | EPOLLEXCLUSIVE;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int addwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int evqueue(void) {\n#ifdef __linux__\n return epoll_create1(0);\n#else\n return kqueue();\n#endif\n}\n\nstruct bgworkctx { \n void (*work)(void *udata);\n void (*done)(struct net_conn *conn, void *udata);\n struct net_conn *conn;\n void *udata;\n bool writer;\n};\n\n// static void bgdone(struct bgworkctx *bgctx);\n\nstruct net_conn {\n int fd;\n struct net_conn *next; // for hashmap bucket\n bool closed;\n struct tls *tls;\n void *udata;\n char *out;\n size_t outlen;\n size_t outcap;\n struct bgworkctx *bgctx;\n struct qthreadctx *ctx;\n unsigned stat_cmd_get;\n unsigned stat_cmd_set;\n unsigned stat_get_hits;\n unsigned stat_get_misses;\n};\n\nstatic struct net_conn *conn_new(int fd, struct qthreadctx *ctx) {\n struct net_conn *conn = xmalloc(sizeof(struct net_conn));\n memset(conn, 0, sizeof(struct net_conn));\n conn->fd = fd;\n conn->ctx = ctx;\n return conn;\n}\n\nstatic void conn_free(struct net_conn *conn) {\n if (conn) {\n if (conn->out) {\n xfree(conn->out);\n }\n xfree(conn);\n }\n}\n\nvoid net_conn_out_ensure(struct net_conn *conn, size_t amount) {\n if (conn->outcap-conn->outlen >= amount) {\n return;\n }\n size_t cap = conn->outcap == 0 ? 16 : conn->outcap * 2;\n while (cap-conn->outlen < amount) {\n cap *= 2;\n }\n char *out = xmalloc(cap);\n memcpy(out, conn->out, conn->outlen);\n xfree(conn->out);\n conn->out = out;\n conn->outcap = cap;\n}\n\nvoid net_conn_out_write_byte_nocheck(struct net_conn *conn, char byte) {\n conn->out[conn->outlen++] = byte;\n}\n\nvoid net_conn_out_write_byte(struct net_conn *conn, char byte) {\n if (conn->outcap == conn->outlen) {\n net_conn_out_ensure(conn, 1);\n }\n net_conn_out_write_byte_nocheck(conn, byte);\n}\n\nvoid net_conn_out_write_nocheck(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n memcpy(conn->out+conn->outlen, data, nbytes);\n conn->outlen += nbytes;\n}\n\nvoid net_conn_out_write(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n if (conn->outcap-conn->outlen < nbytes) {\n net_conn_out_ensure(conn, nbytes);\n }\n net_conn_out_write_nocheck(conn, data, nbytes);\n}\n\nchar *net_conn_out(struct net_conn *conn) {\n return conn->out;\n}\n\nsize_t net_conn_out_len(struct net_conn *conn) {\n return conn->outlen;\n}\n\nsize_t net_conn_out_cap(struct net_conn *conn) {\n return conn->outcap;\n}\n\nvoid net_conn_out_setlen(struct net_conn *conn, size_t len) {\n assert(len < conn->outcap);\n conn->outlen = len;\n}\n\n\nbool net_conn_isclosed(struct net_conn *conn) {\n return conn->closed;\n}\n\nvoid net_conn_close(struct net_conn *conn) {\n conn->closed = true;\n}\n\nvoid net_conn_setudata(struct net_conn *conn, void *udata) {\n conn->udata = udata;\n}\n\nvoid *net_conn_udata(struct net_conn *conn) {\n return conn->udata;\n}\n\nstatic uint64_t hashfd(int fd) {\n return mix13((uint64_t)fd);\n}\n\n// map of connections\nstruct cmap {\n struct net_conn **buckets;\n size_t nbuckets;\n size_t len;\n};\n\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn);\n\nstatic void cmap_grow(struct cmap *cmap) {\n struct cmap cmap2 = { 0 };\n cmap2.nbuckets = cmap->nbuckets*2;\n size_t size = cmap2.nbuckets * sizeof(struct net_conn*);\n cmap2.buckets = xmalloc(size);\n memset(cmap2.buckets, 0, cmap2.nbuckets*sizeof(struct net_conn*));\n for (size_t i = 0; i < cmap->nbuckets; i++) {\n struct net_conn *conn = cmap->buckets[i];\n while (conn) {\n struct net_conn *next = conn->next;\n conn->next = 0;\n cmap_insert(&cmap2, conn);\n conn = next;\n }\n }\n xfree(cmap->buckets);\n memcpy(cmap, &cmap2, sizeof(struct cmap));\n}\n\n// Insert a connection into a map. \n// The connection MUST NOT exist in the map.\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n if (cmap->len >= cmap->nbuckets-(cmap->nbuckets>>2)) { // 75% load factor\n // if (cmap->len >= cmap->nbuckets) { // 100% load factor\n cmap_grow(cmap);\n }\n size_t i = hash % cmap->nbuckets;\n conn->next = cmap->buckets[i];\n cmap->buckets[i] = conn;\n cmap->len++;\n}\n\n// Return the connection or NULL if not exists.\nstatic struct net_conn *cmap_get(struct cmap *cmap, int fd) {\n uint32_t hash = hashfd(fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *conn = cmap->buckets[i];\n while (conn && conn->fd != fd) {\n conn = conn->next;\n }\n return conn;\n}\n\n// Delete connection from map. \n// The connection MUST exist in the map.\nstatic void cmap_delete(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *prev = 0;\n struct net_conn *iter = cmap->buckets[i];\n while (iter != conn) {\n prev = iter;\n iter = iter->next;\n }\n if (prev) {\n prev->next = iter->next;\n } else {\n cmap->buckets[i] = iter->next;\n }\n}\n\nstatic atomic_size_t nconns = 0;\nstatic atomic_size_t tconns = 0;\nstatic atomic_size_t rconns = 0;\n\nstatic pthread_mutex_t tls_ready_fds_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic int tls_ready_fds_cap = 0;\nstatic int tls_ready_fds_len = 0;\nstatic int *tls_ready_fds = 0;\n\nstatic void save_tls_fd(int fd) {\n pthread_mutex_lock(&tls_ready_fds_lock);\n if (tls_ready_fds_len == tls_ready_fds_cap) {\n tls_ready_fds_cap *= 2;\n if (tls_ready_fds_cap == 0) {\n tls_ready_fds_cap = 8;\n }\n tls_ready_fds = xrealloc(tls_ready_fds, tls_ready_fds_cap*sizeof(int));\n }\n tls_ready_fds[tls_ready_fds_len++] = fd;\n pthread_mutex_unlock(&tls_ready_fds_lock);\n}\n\nstatic bool del_tls_fd(int fd) {\n bool found = false;\n pthread_mutex_lock(&tls_ready_fds_lock);\n for (int i = 0; i < tls_ready_fds_len; i++) {\n if (tls_ready_fds[i] == fd) {\n tls_ready_fds[i] = tls_ready_fds[tls_ready_fds_len-1];\n tls_ready_fds_len--;\n found = true;\n break;\n }\n }\n pthread_mutex_unlock(&tls_ready_fds_lock);\n return found;\n}\n\nstruct qthreadctx {\n pthread_t th;\n int qfd;\n int index;\n int maxconns;\n int *sfd; // three entries\n bool tcpnodelay;\n bool keepalive;\n bool quickack;\n int queuesize;\n const char *unixsock;\n void *udata;\n bool uring;\n#ifndef NOURING\n struct io_uring ring;\n#endif\n void(*data)(struct net_conn*,const void*,size_t,void*);\n void(*opened)(struct net_conn*,void*);\n void(*closed)(struct net_conn*,void*);\n int nevents;\n event_t *events;\n atomic_int nconns;\n int ntlsconns;\n char *inpkts;\n struct net_conn **qreads;\n struct net_conn **qins;\n struct net_conn **qattachs;\n struct net_conn **qouts;\n struct net_conn **qcloses;\n char **qinpkts;\n int *qinpktlens; \n int nqreads;\n int nqins;\n int nqcloses;\n int nqattachs;\n int nqouts;\n int nthreads;\n \n uint64_t stat_cmd_get;\n uint64_t stat_cmd_set;\n uint64_t stat_get_hits;\n uint64_t stat_get_misses;\n\n struct qthreadctx *ctxs;\n struct cmap cmap;\n};\n\nstatic atomic_uint_fast64_t g_stat_cmd_get = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_set = 0;\nstatic atomic_uint_fast64_t g_stat_get_hits = 0;\nstatic atomic_uint_fast64_t g_stat_get_misses = 0;\n\ninline\nstatic void sumstats(struct net_conn *conn, struct qthreadctx *ctx) {\n ctx->stat_cmd_get += conn->stat_cmd_get;\n conn->stat_cmd_get = 0;\n ctx->stat_cmd_set += conn->stat_cmd_set;\n conn->stat_cmd_set = 0;\n ctx->stat_get_hits += conn->stat_get_hits;\n conn->stat_get_hits = 0;\n ctx->stat_get_misses += conn->stat_get_misses;\n conn->stat_get_misses = 0;\n}\n\ninline\nstatic void sumstats_global(struct qthreadctx *ctx) {\n atomic_fetch_add_explicit(&g_stat_cmd_get, ctx->stat_cmd_get, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_get = 0;\n atomic_fetch_add_explicit(&g_stat_cmd_set, ctx->stat_cmd_set, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_set = 0;\n atomic_fetch_add_explicit(&g_stat_get_hits, ctx->stat_get_hits, \n __ATOMIC_RELAXED);\n ctx->stat_get_hits = 0;\n atomic_fetch_add_explicit(&g_stat_get_misses, ctx->stat_get_misses, \n __ATOMIC_RELAXED);\n ctx->stat_get_misses = 0;\n}\n\nuint64_t stat_cmd_get(void) {\n uint64_t x = atomic_load_explicit(&g_stat_cmd_get, __ATOMIC_RELAXED);\n return x;\n}\n\nuint64_t stat_cmd_set(void) {\n return atomic_load_explicit(&g_stat_cmd_set, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_hits(void) {\n return atomic_load_explicit(&g_stat_get_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_misses(void) {\n return atomic_load_explicit(&g_stat_get_misses, __ATOMIC_RELAXED);\n}\n\ninline\nstatic void qreset(struct qthreadctx *ctx) {\n ctx->nqreads = 0;\n ctx->nqins = 0;\n ctx->nqcloses = 0;\n ctx->nqouts = 0;\n ctx->nqattachs = 0;\n}\n\ninline\nstatic void qaccept(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nevents; i++) {\n int fd = event_fd(&ctx->events[i]);\n struct net_conn *conn = cmap_get(&ctx->cmap, fd);\n if (!conn) {\n if ((fd == ctx->sfd[0] || fd == ctx->sfd[1] || fd == ctx->sfd[2])) {\n int sfd = fd;\n fd = accept(fd, 0, 0);\n if (fd == -1) {\n continue;\n }\n if (setnonblock(fd) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[0] || sfd == ctx->sfd[2]) {\n if (setkeepalive(fd, ctx->keepalive) == -1) {\n close(fd);\n continue;\n }\n if (settcpnodelay(fd, ctx->tcpnodelay) == -1) {\n close(fd);\n continue;\n }\n if (setquickack(fd, ctx->quickack) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[2]) {\n save_tls_fd(fd);\n }\n }\n static atomic_uint_fast64_t next_ctx_index = 0;\n int idx = atomic_fetch_add(&next_ctx_index, 1) % ctx->nthreads;\n if (addread(ctx->ctxs[idx].qfd, fd) == -1) {\n if (sfd == ctx->sfd[2]) {\n del_tls_fd(fd);\n }\n close(fd);\n continue;\n }\n continue;\n }\n size_t xnconns = atomic_fetch_add(&nconns, 1);\n if (xnconns >= (size_t)ctx->maxconns) {\n // rejected\n atomic_fetch_add(&rconns, 1);\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n continue;\n }\n bool istls = del_tls_fd(fd);\n conn = conn_new(fd, ctx);\n if (istls) {\n if (!tls_accept(conn->fd, &conn->tls)) {\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n conn_free(conn);\n continue;\n }\n ctx->ntlsconns++;\n }\n atomic_fetch_add_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_add_explicit(&tconns, 1, __ATOMIC_RELEASE);\n cmap_insert(&ctx->cmap, conn);\n ctx->opened(conn, ctx->udata);\n }\n if (conn->bgctx) {\n // BGWORK(2)\n // The connection has been added back to the event loop, but it\n // needs to be attached and restated.\n ctx->qattachs[ctx->nqattachs++] = conn;\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void handle_read(ssize_t n, char *pkt, struct net_conn *conn,\n struct qthreadctx *ctx)\n{\n assert(conn->outlen == 0);\n assert(conn->bgctx == 0);\n if (n <= 0) {\n if (n == 0 || errno != EAGAIN) {\n // read failed, close connection\n ctx->qcloses[ctx->nqcloses++] = conn;\n return;\n }\n assert(n == -1 && errno == EAGAIN);\n // even though there's an EAGAIN, still call the user data event\n // handler with an empty packet \n n = 0;\n }\n pkt[n] = '\\0';\n ctx->qins[ctx->nqins] = conn;\n ctx->qinpkts[ctx->nqins] = pkt;\n ctx->qinpktlens[ctx->nqins] = n;\n ctx->nqins++;\n}\n\ninline \nstatic void flush_conn(struct net_conn *conn, size_t written) {\n while (written < conn->outlen) {\n ssize_t n;\n if (conn->tls) {\n n = tls_write(conn->tls, conn->fd, conn->out+written, \n conn->outlen-written);\n } else {\n n = write(conn->fd, conn->out+written, conn->outlen-written);\n }\n if (n == -1) {\n if (errno == EAGAIN) {\n continue;\n }\n conn->closed = true;\n break;\n }\n written += n;\n }\n // either everything was written or the socket is closed\n conn->outlen = 0;\n}\n\ninline\nstatic void qattach(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nqattachs; i++) {\n // BGWORK(3)\n // A bgworker has finished, make sure it's added back into the \n // event loop in the correct state.\n struct net_conn *conn = ctx->qattachs[i];\n struct bgworkctx *bgctx = conn->bgctx;\n bgctx->done(conn, bgctx->udata);\n conn->bgctx = 0;\n assert(bgctx);\n xfree(bgctx);\n int ret = delwrite(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n ret = addread(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void qread(struct qthreadctx *ctx) {\n // Read incoming socket data\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // read incoming using uring\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_read(sqe, conn->fd, pkt, PACKETSIZE-1, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n assert(ret == ctx->nqreads);\n for (int i = 0; i < ctx->nqreads; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n = cqe->res;\n if (n < 0) {\n errno = -n;\n n = -1;\n }\n handle_read(n, pkt, conn, ctx);\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // read incoming data using standard syscalls.\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n;\n if (conn->tls) {\n n = tls_read(conn->tls, conn->fd, pkt, PACKETSIZE-1);\n } else {\n n = read(conn->fd, pkt, PACKETSIZE-1);\n }\n handle_read(n, pkt, conn, ctx);\n }\n#ifndef NOURING\n }\n#endif\n}\n\n\ninline\nstatic void qprocess(struct qthreadctx *ctx) {\n // process all new incoming data\n for (int i = 0; i < ctx->nqins; i++) {\n struct net_conn *conn = ctx->qins[i];\n char *p = ctx->qinpkts[i];\n int n = ctx->qinpktlens[i];\n ctx->data(conn, p, n, ctx->udata);\n sumstats(conn, ctx);\n if (conn->bgctx) {\n // BGWORK(1)\n // Connection entered background mode.\n // This means the connection is no longer in the event queue but\n // is still owned by this qthread. Once the bgwork is done the \n // connection will be added back to the queue with addwrite.\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n}\n\ninline\nstatic void qprewrite(struct qthreadctx *ctx) {\n (void)ctx;\n // TODO: perform any prewrite operations\n}\n\ninline\nstatic void qwrite(struct qthreadctx *ctx) {\n // Flush all outgoing socket data.\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // write outgoing using uring\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_write(sqe, conn->fd, conn->out, conn->outlen, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n for (int i = 0; i < ctx->nqouts; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qouts[i];\n ssize_t n = cqe->res;\n if (n == -EAGAIN) {\n n = 0;\n }\n if (n < 0) {\n conn->closed = true;\n } else {\n // Any extra data must be flushed using syscall write.\n flush_conn(conn, n);\n }\n // Either everything was written or the socket is closed\n conn->outlen = 0;\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // Write data using write syscall\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n#ifndef NOURING\n }\n#endif\n}\n\ninline\nstatic void qclose(struct qthreadctx *ctx) {\n // Close all sockets that need to be closed\n for (int i = 0; i < ctx->nqcloses; i++) {\n struct net_conn *conn = ctx->qcloses[i];\n ctx->closed(conn, ctx->udata);\n if (conn->tls) {\n tls_close(conn->tls, conn->fd);\n ctx->ntlsconns--;\n } else {\n close(conn->fd);\n }\n cmap_delete(&ctx->cmap, conn);\n atomic_fetch_sub_explicit(&nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_sub_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n conn_free(conn);\n }\n}\n\nstatic void *qthread(void *arg) {\n struct qthreadctx *ctx = arg;\n#ifndef NOURING\n if (ctx->uring) {\n if (io_uring_queue_init(ctx->queuesize, &ctx->ring, 0) < 0) {\n perror(\"# io_uring_queue_init\");\n abort();\n }\n }\n#endif\n // connection map\n memset(&ctx->cmap, 0, sizeof(struct cmap));\n ctx->cmap.nbuckets = 64;\n size_t size = ctx->cmap.nbuckets*sizeof(struct net_conn*);\n ctx->cmap.buckets = xmalloc(size);\n memset(ctx->cmap.buckets, 0, ctx->cmap.nbuckets*sizeof(struct net_conn*));\n\n ctx->events = xmalloc(sizeof(event_t)*ctx->queuesize);\n ctx->qreads = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->inpkts = xmalloc(PACKETSIZE*ctx->queuesize);\n ctx->qins = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qinpkts = xmalloc(sizeof(char*)*ctx->queuesize);\n ctx->qinpktlens = xmalloc(sizeof(int)*ctx->queuesize);\n ctx->qcloses = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qouts = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qattachs = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n\n while (1) {\n sumstats_global(ctx);\n ctx->nevents = getevents(ctx->qfd, ctx->events, ctx->queuesize, 1, 0);\n if (ctx->nevents <= 0) {\n if (ctx->nevents == -1 && errno != EINTR) {\n perror(\"# getevents\");\n abort();\n }\n continue;\n }\n // reset, accept, attach, read, process, prewrite, write, close\n qreset(ctx); // reset the step queues\n qaccept(ctx); // accept incoming connections\n qattach(ctx); // attach bg workers. uncommon\n qread(ctx); // read from sockets\n qprocess(ctx); // process new socket data\n qprewrite(ctx); // perform any prewrite operations, such as fsync\n qwrite(ctx); // write to sockets\n qclose(ctx); // close any sockets that need closing\n }\n return 0;\n}\n\nstatic int listen_tcp(const char *host, const char *port, bool reuseport, \n int backlog)\n{\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return 0;\n }\n int ret;\n host = host ? host : \"127.0.0.1\";\n port = port ? port : \"0\";\n struct addrinfo hints = { 0 }, *addrs;\n hints.ai_family = AF_UNSPEC; \n hints.ai_socktype = SOCK_STREAM;\n hints.ai_protocol = IPPROTO_TCP;\n ret = getaddrinfo(host, port, &hints, &addrs);\n if (ret != 0) {\n fprintf(stderr, \"# getaddrinfo: %s: %s:%s\", gai_strerror(ret), host,\n port);\n abort();\n }\n struct addrinfo *ainfo = addrs;\n while (ainfo->ai_family != PF_INET) {\n ainfo = ainfo->ai_next;\n }\n assert(ainfo);\n int fd = socket(ainfo->ai_family, ainfo->ai_socktype, ainfo->ai_protocol);\n if (fd == -1) {\n perror(\"# socket(tcp)\");\n abort();\n }\n if (reuseport) {\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &(int){1}, \n sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseport)\");\n abort();\n }\n }\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &(int){1},sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseaddr)\");\n abort();\n }\n ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n ret = bind(fd, ainfo->ai_addr, ainfo->ai_addrlen);\n if (ret == -1) {\n fprintf(stderr, \"# bind(tcp): %s:%s\", host, port);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(tcp): %s:%s\", host, port);\n abort();\n }\n freeaddrinfo(addrs);\n return fd;\n}\n\nstatic int listen_unixsock(const char *unixsock, int backlog) {\n if (!unixsock || !*unixsock) {\n return 0;\n }\n struct sockaddr_un unaddr;\n int fd = socket(AF_UNIX, SOCK_STREAM, 0);\n if (fd == -1) {\n perror(\"# socket(unix)\");\n abort();\n }\n memset(&unaddr, 0, sizeof(struct sockaddr_un));\n unaddr.sun_family = AF_UNIX;\n strncpy(unaddr.sun_path, unixsock, sizeof(unaddr.sun_path) - 1);\n int ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n unlink(unixsock);\n ret = bind(fd, (struct sockaddr *)&unaddr, sizeof(struct sockaddr_un));\n if (ret == -1) {\n fprintf(stderr, \"# bind(unix): %s\", unixsock);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(unix): %s\", unixsock);\n abort();\n }\n return fd;\n}\n\nstatic atomic_uintptr_t all_ctxs = 0;\n\n// current connections\nsize_t net_nconns(void) {\n return atomic_load_explicit(&nconns, __ATOMIC_ACQUIRE);\n}\n\n// total connections ever\nsize_t net_tconns(void) {\n return atomic_load_explicit(&tconns, __ATOMIC_ACQUIRE);\n}\n\n// total rejected connections ever\nsize_t net_rconns(void) {\n return atomic_load_explicit(&rconns, __ATOMIC_ACQUIRE);\n}\n\nstatic void warmupunix(const char *unixsock, int nsocks) {\n if (!unixsock || !*unixsock) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n socks[i] = socket(AF_UNIX, SOCK_STREAM, 0);\n if (socks[i] == -1) {\n socks[i] = 0;\n continue;\n }\n struct sockaddr_un addr;\n memset(&addr, 0, sizeof(struct sockaddr_un));\n addr.sun_family = AF_UNIX;\n strncpy(addr.sun_path, unixsock, sizeof(addr.sun_path) - 1);\n if (connect(socks[i], (struct sockaddr *)&addr, \n sizeof(struct sockaddr_un)) == -1)\n {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup unix socket (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\n\nstatic void warmuptcp(const char *host, const char *port, int nsocks) {\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n struct addrinfo hints, *res;\n memset(&hints, 0, sizeof(hints));\n hints.ai_family = AF_INET;\n hints.ai_socktype = SOCK_STREAM;\n int err = getaddrinfo(host, port, &hints, &res);\n if (err != 0) {\n continue;\n }\n socks[i] = socket(res->ai_family, res->ai_socktype, res->ai_protocol);\n if (socks[i] == -1) {\n freeaddrinfo(res);\n continue;\n }\n int ret = connect(socks[i], res->ai_addr, res->ai_addrlen);\n freeaddrinfo(res);\n if (ret == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup tcp (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\nstatic void *thwarmup(void *arg) {\n // Perform a warmup of the epoll queues and listeners by making a quick\n // connection to each.\n struct net_opts *opts = arg;\n warmupunix(opts->unixsock, opts->nthreads*2);\n warmuptcp(opts->host, opts->port, opts->nthreads*2);\n return 0;\n}\n\nvoid net_main(struct net_opts *opts) {\n (void)delread;\n int sfd[3] = {\n listen_tcp(opts->host, opts->port, opts->reuseport, opts->backlog),\n listen_unixsock(opts->unixsock, opts->backlog),\n listen_tcp(opts->host, opts->tlsport, opts->reuseport, opts->backlog),\n };\n if (!sfd[0] && !sfd[1] && !sfd[2]) {\n printf(\"# No listeners provided\\n\");\n abort();\n }\n opts->listening(opts->udata);\n struct qthreadctx *ctxs = xmalloc(sizeof(struct qthreadctx)*opts->nthreads);\n memset(ctxs, 0, sizeof(struct qthreadctx)*opts->nthreads);\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n ctx->nthreads = opts->nthreads;\n ctx->tcpnodelay = opts->tcpnodelay;\n ctx->keepalive = opts->keepalive;\n ctx->quickack = opts->quickack;\n ctx->uring = !opts->nouring;\n ctx->ctxs = ctxs;\n ctx->index = i;\n ctx->maxconns = opts->maxconns;\n ctx->sfd = sfd;\n ctx->data = opts->data;\n ctx->udata = opts->udata;\n ctx->opened = opts->opened;\n ctx->closed = opts->closed;\n ctx->qfd = evqueue();\n if (ctx->qfd == -1) {\n perror(\"# evqueue\");\n abort();\n }\n atomic_init(&ctx->nconns, 0);\n for (int j = 0; j < 3; j++) {\n if (sfd[j]) {\n int ret = addread(ctx->qfd, sfd[j]);\n if (ret == -1) {\n perror(\"# addread\");\n abort();\n }\n }\n }\n ctx->unixsock = opts->unixsock;\n ctx->queuesize = opts->queuesize;\n }\n atomic_store(&all_ctxs, (uintptr_t)(void*)ctxs);\n opts->ready(opts->udata);\n if (!opts->nowarmup) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thwarmup, opts);\n if (ret != -1) {\n pthread_detach(th);\n }\n }\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n if (i == opts->nthreads-1) {\n qthread(ctx);\n } else {\n int ret = pthread_create(&ctx->th, 0, qthread, ctx);\n if (ret == -1) {\n perror(\"# pthread_create\");\n abort();\n }\n }\n }\n}\n\nstatic void *bgwork(void *arg) {\n struct bgworkctx *bgctx = arg;\n bgctx->work(bgctx->udata);\n // We are not in the same thread context as the event loop that owns this\n // connection. Adding the writer to the queue will allow for the loop\n // thread to gracefully continue the operation and then call the 'done'\n // callback.\n int ret = addwrite(bgctx->conn->ctx->qfd, bgctx->conn->fd);\n assert(ret == 0); (void)ret;\n return 0;\n}\n\n// net_conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool net_conn_bgwork(struct net_conn *conn, void (*work)(void *udata), \n void (*done)(struct net_conn *conn, void *udata), void *udata)\n{\n if (conn->bgctx || conn->closed) {\n return false;\n }\n struct qthreadctx *ctx = conn->ctx;\n int ret = delread(ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n conn->bgctx = xmalloc(sizeof(struct bgworkctx));\n memset(conn->bgctx, 0, sizeof(struct bgworkctx));\n conn->bgctx->conn = conn;\n conn->bgctx->done = done;\n conn->bgctx->work = work;\n conn->bgctx->udata = udata;\n pthread_t th;\n if (pthread_create(&th, 0, bgwork, conn->bgctx) == -1) {\n // Failed to create thread. Revert and return false.\n ret = addread(ctx->qfd, conn->fd);\n assert(ret == 0);\n xfree(conn->bgctx);\n conn->bgctx = 0;\n return false;\n } else {\n pthread_detach(th);\n }\n return true;\n}\n\nbool net_conn_bgworking(struct net_conn *conn) {\n return conn->bgctx != 0;\n}\n\nvoid net_stat_cmd_get_incr(struct net_conn *conn) {\n conn->stat_cmd_get++;\n}\n\nvoid net_stat_cmd_set_incr(struct net_conn *conn) {\n conn->stat_cmd_set++;\n}\n\nvoid net_stat_get_hits_incr(struct net_conn *conn) {\n conn->stat_get_hits++;\n}\n\nvoid net_stat_get_misses_incr(struct net_conn *conn) {\n conn->stat_get_misses++;\n}\n\nbool net_conn_istls(struct net_conn *conn) {\n return conn->tls != 0;\n}\n"], ["/pogocache/src/main.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit main.c is the main entry point for the Pogocache program.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"conn.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"save.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"pogocache.h\"\n#include \"gitinfo.h\"\n#include \"uring.h\"\n\n// default user flags\nint nthreads = 0; // number of client threads\nchar *port = \"9401\"; // default tcp port (non-tls)\nchar *host = \"127.0.0.1\"; // default hostname or ip address\nchar *persist = \"\"; // file to load and save data to\nchar *unixsock = \"\"; // use a unix socket\nchar *reuseport = \"no\"; // reuse tcp port for other programs\nchar *tcpnodelay = \"yes\"; // disable nagle's algorithm\nchar *quickack = \"no\"; // enable quick acks\nchar *usecas = \"no\"; // enable compare and store\nchar *keepalive = \"yes\"; // socket keepalive setting\nint backlog = 1024; // network socket accept backlog\nint queuesize = 128; // event queue size\nchar *maxmemory = \"80%\"; // Maximum memory allowed - 80% total system\nchar *evict = \"yes\"; // evict keys when maxmemory reached\nint loadfactor = 75; // hashmap load factor\nchar *keysixpack = \"yes\"; // use sixpack compression on keys\nchar *trackallocs = \"no\"; // track allocations (for debugging)\nchar *auth = \"\"; // auth token or pa\nchar *tlsport = \"\"; // enable tls over tcp port\nchar *tlscertfile = \"\"; // tls cert file\nchar *tlskeyfile = \"\"; // tls key file\nchar *tlscacertfile = \"\"; // tls ca cert file\nchar *uring = \"yes\"; // use uring (linux only)\nint maxconns = 1024; // maximum number of sockets\nchar *noticker = \"no\";\nchar *warmup = \"yes\";\n\n// Global variables calculated in main().\n// These should never change during the lifetime of the process.\n// Other source files must use the \"extern const\" specifier.\nchar *version;\nchar *githash;\nuint64_t seed;\nsize_t sysmem;\nsize_t memlimit;\nint verb; // verbosity, 0=no, 1=verbose, 2=very, 3=extremely\nbool usesixpack;\nint useallocator;\nbool usetrackallocs;\nbool useevict;\nint nshards;\nbool usetls; // use tls security (pemfile required);\nbool useauth; // use auth password\nbool usecolor; // allow color in terminal\nchar *useid; // instance id (unique to every process run)\nint64_t procstart; // proc start boot time, for uptime stat\n\n// Global atomic variable. These are safe to read and modify by other source\n// files, as long as those sources use \"atomic_\" methods.\natomic_int shutdownreq; // shutdown request counter\natomic_int_fast64_t flush_delay; // delay in seconds to next async flushall\natomic_bool sweep; // mark for async sweep, asap\natomic_bool registered; // registration is active\natomic_bool lowmem; // system is in low memory mode.\n\nstruct pogocache *cache;\n\n// min max robinhood load factor (75% performs pretty well)\n#define MINLOADFACTOR_RH 55\n#define MAXLOADFACTOR_RH 95\n\nstatic void ready(void *udata) {\n (void)udata;\n printf(\"* Ready to accept connections\\n\");\n}\n\n#define noopt \"%s\"\n\n#define HELP(format, ...) \\\n fprintf(file, format, ##__VA_ARGS__)\n\n#define HOPT(opt, desc, format, ...) \\\n fprintf(file, \" \"); \\\n fprintf(file, \"%-22s \", opt); \\\n fprintf(file, \"%-30s \", desc); \\\n if (strcmp(format, noopt) != 0) { \\\n fprintf(file, \"(default: \" format \")\", ##__VA_ARGS__); \\\n } \\\n fprintf(file, \"\\n\");\n\nstatic int calc_nshards(int nprocs) {\n switch (nprocs) {\n case 1: return 64;\n case 2: return 128;\n case 3: return 256;\n case 4: return 512;\n case 5: return 1024;\n case 6: return 2048;\n default: return 4096;\n }\n}\n\nstatic void showhelp(FILE *file) {\n int nprocs = sys_nprocs();\n int nshards = calc_nshards(nprocs);\n\n HELP(\"Usage: %s [options]\\n\", \"pogocache\");\n HELP(\"\\n\");\n\n HELP(\"Basic options:\\n\");\n HOPT(\"-h hostname\", \"listening host\", \"%s\", host);\n HOPT(\"-p port\", \"listening port\", \"%s\", port);\n HOPT(\"-s socket\", \"unix socket file\", \"%s\", *unixsock?unixsock:\"none\");\n\n HOPT(\"-v,-vv,-vvv\", \"verbose logging level\", noopt, \"\");\n HELP(\"\\n\");\n \n HELP(\"Additional options:\\n\");\n HOPT(\"--threads count\", \"number of threads\", \"%d\", nprocs);\n HOPT(\"--maxmemory value\", \"set max memory usage\", \"%s\", maxmemory);\n HOPT(\"--evict yes/no\", \"evict keys at maxmemory\", \"%s\", evict);\n HOPT(\"--persist path\", \"persistence file\", \"%s\", *persist?persist:\"none\");\n HOPT(\"--maxconns conns\", \"maximum connections\", \"%d\", maxconns);\n HELP(\"\\n\");\n \n HELP(\"Security options:\\n\");\n HOPT(\"--auth passwd\", \"auth token or password\", \"%s\", *auth?auth:\"none\");\n#ifndef NOOPENSSL\n HOPT(\"--tlsport port\", \"enable tls on port\", \"%s\", \"none\");\n HOPT(\"--tlscert certfile\", \"tls cert file\", \"%s\", \"none\");\n HOPT(\"--tlskey keyfile\", \"tls key file\", \"%s\", \"none\");\n HOPT(\"--tlscacert cacertfile\", \"tls ca-cert file\", \"%s\", \"none\");\n#endif\n HELP(\"\\n\");\n\n HELP(\"Advanced options:\\n\");\n HOPT(\"--shards count\", \"number of shards\", \"%d\", nshards);\n HOPT(\"--backlog count\", \"accept backlog\", \"%d\", backlog);\n HOPT(\"--queuesize count\", \"event queuesize size\", \"%d\", queuesize);\n HOPT(\"--reuseport yes/no\", \"reuseport for tcp\", \"%s\", reuseport);\n HOPT(\"--tcpnodelay yes/no\", \"disable nagle's algo\", \"%s\", tcpnodelay);\n HOPT(\"--quickack yes/no\", \"use quickack (linux)\", \"%s\", quickack);\n HOPT(\"--uring yes/no\", \"use uring (linux)\", \"%s\", uring);\n HOPT(\"--loadfactor percent\", \"hashmap load factor\", \"%d\", loadfactor);\n HOPT(\"--keysixpack yes/no\", \"sixpack compress keys\", \"%s\", keysixpack);\n HOPT(\"--cas yes/no\", \"use compare and store\", \"%s\", usecas);\n HELP(\"\\n\");\n}\n\nstatic void showversion(FILE *file) {\n#ifdef CCSANI\n fprintf(file, \"pogocache %s (CCSANI)\\n\", version);\n#else\n fprintf(file, \"pogocache %s\\n\", version);\n#endif\n}\n\nstatic size_t calc_memlimit(char *maxmemory) {\n if (strcmp(maxmemory, \"unlimited\") == 0) {\n return SIZE_MAX;\n }\n char *oval = maxmemory;\n while (isspace(*maxmemory)) {\n maxmemory++;\n }\n char *end;\n errno = 0;\n double mem = strtod(maxmemory, &end);\n if (errno || !(mem > 0) || !isfinite(mem)) {\n goto fail;\n }\n while (isspace(*end)) {\n end++;\n }\n #define exteq(c) \\\n (tolower(end[0])==c&& (!end[1]||(tolower(end[1])=='b'&&!end[2])))\n\n if (strcmp(end, \"\") == 0) {\n return mem;\n } else if (strcmp(end, \"%\") == 0) {\n return (((double)mem)/100.0) * sysmem;\n } else if (exteq('k')) {\n return mem*1024.0;\n } else if (exteq('m')) {\n return mem*1024.0*1024.0;\n } else if (exteq('g')) {\n return mem*1024.0*1024.0*1024.0;\n } else if (exteq('t')) {\n return mem*1024.0*1024.0*1024.0*1024.0;\n }\nfail:\n fprintf(stderr, \"# Invalid maxmemory '%s'\\n\", oval);\n showhelp(stderr);\n exit(1);\n}\n\nstatic size_t setmaxrlimit(void) {\n size_t maxconns = 0;\n struct rlimit rl;\n if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {\n maxconns = rl.rlim_max;\n rl.rlim_cur = rl.rlim_max;\n rl.rlim_max = rl.rlim_max;\n if (setrlimit(RLIMIT_NOFILE, &rl) != 0) {\n perror(\"# setrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n } else {\n perror(\"# getrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n return maxconns;\n}\n\nstatic void evicted(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)value, (void)valuelen, (void)expires, (void)udata;\n return;\n printf(\". evicted shard=%d, reason=%d, time=%\" PRIi64 \", key='%.*s'\"\n \", flags=%\" PRIu32 \", cas=%\" PRIu64 \"\\n\",\n shard, reason, time, (int)keylen, (char*)key, flags, cas);\n}\n\n#define BEGIN_FLAGS() \\\n if (0) {\n#define BFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option %s missing value\\n\", opt); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n }\n#define TFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n if (!dryrun) { \\\n op; \\\n }\n#define AFLAG(name, op) \\\n } else if (strcmp(argv[i], \"--\" name) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option --%s missing value\\n\", name); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n } \\\n } else if (strstr(argv[i], \"--\" name \"=\") == argv[i]) { \\\n if (!dryrun) { \\\n char *flag = argv[i]+strlen(name)+3; op; \\\n }\n#define END_FLAGS() \\\n } else { \\\n fprintf(stderr, \"# Unknown program option %s\\n\", argv[i]); \\\n exit(1); \\\n }\n\n#define INVALID_FLAG(name, value) \\\n fprintf(stderr, \"# Option --%s is invalid\\n\", name); \\\n exit(1);\n\nstatic atomic_bool loaded = false;\n\nvoid sigterm(int sig) {\n if (sig == SIGINT || sig == SIGTERM) {\n if (!atomic_load(&loaded) || !*persist) {\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n if (*persist) {\n printf(\"* Saving data to %s, please wait...\\n\", persist);\n int ret = save(persist, true);\n if (ret != 0) {\n perror(\"# Save failed\");\n _Exit(1);\n }\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n\n int count = atomic_fetch_add(&shutdownreq, 1);\n if (count > 0 && sig == SIGINT) {\n printf(\"# User forced shutdown\\n\");\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n }\n}\n\nstatic void tick(void) {\n if (!atomic_load_explicit(&loaded, __ATOMIC_ACQUIRE)) {\n return;\n }\n // Memory usage check\n if (memlimit < SIZE_MAX) {\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n size_t memusage = meminfo.rss;\n if (!lowmem) {\n if (memusage > memlimit) {\n atomic_store(&lowmem, true);\n if (verb > 0) {\n printf(\"# Low memory mode on\\n\");\n }\n }\n } else {\n if (memusage < memlimit) {\n atomic_store(&lowmem, false);\n if (verb > 0) {\n printf(\"# Low memory mode off\\n\");\n }\n }\n }\n }\n\n // Print allocations to terminal.\n if (usetrackallocs) {\n printf(\". keys=%zu, allocs=%zu, conns=%zu\\n\",\n pogocache_count(cache, 0), xallocs(), net_nconns());\n }\n\n}\n\nstatic void *ticker(void *arg) {\n (void)arg;\n while (1) {\n tick();\n sleep(1);\n }\n return 0;\n}\n\nstatic void listening(void *udata) {\n (void)udata;\n printf(\"* Network listener established\\n\");\n if (*persist) {\n if (!cleanwork(persist)) {\n // An error message has already been printed\n _Exit(0);\n }\n if (access(persist, F_OK) == 0) {\n printf(\"* Loading data from %s, please wait...\\n\", persist);\n struct load_stats stats;\n int64_t start = sys_now();\n int ret = load(persist, true, &stats);\n if (ret != 0) {\n perror(\"# Load failed\");\n _Exit(1);\n }\n double elapsed = (sys_now()-start)/1e9;\n printf(\"* Loaded %zu entries (%zu expired) (%.3f MB in %.3f secs) \"\n \"(%.0f entries/sec, %.0f MB/sec) \\n\", \n stats.ninserted, stats.nexpired,\n stats.csize/1024.0/1024.0, elapsed, \n (stats.ninserted+stats.nexpired)/elapsed, \n stats.csize/1024.0/1024.0/elapsed);\n }\n }\n atomic_store(&loaded, true);\n}\n\nstatic void yield(void *udata) {\n (void)udata;\n sched_yield();\n}\n\nint main(int argc, char *argv[]) {\n procstart = sys_now();\n\n // Intercept signals\n signal(SIGPIPE, SIG_IGN);\n signal(SIGINT, sigterm);\n signal(SIGTERM, sigterm);\n\n // Line buffer logging so pipes will stream.\n setvbuf(stdout, 0, _IOLBF, 0);\n setvbuf(stderr, 0, _IOLBF, 0);\n char guseid[17];\n memset(guseid, 0, 17);\n useid = guseid;\n sys_genuseid(useid); \n const char *maxmemorymb = 0;\n seed = sys_seed();\n verb = 0;\n usetls = false;\n useauth = false;\n lowmem = false;\n version = GITVERS;\n githash = GITHASH;\n\n \n\n\n if (uring_available()) {\n uring = \"yes\";\n } else {\n uring = \"no\";\n }\n\n atomic_init(&shutdownreq, 0);\n atomic_init(&flush_delay, 0);\n atomic_init(&sweep, false);\n atomic_init(®istered, false);\n\n // Parse program flags\n for (int ii = 0; ii < 2; ii++) {\n bool dryrun = ii == 0;\n for (int i = 1; i < argc; i++) {\n if (strcmp(argv[i], \"--help\") == 0) {\n showhelp(stdout);\n exit(0);\n }\n if (strcmp(argv[i], \"--version\") == 0) {\n showversion(stdout);\n exit(0);\n }\n BEGIN_FLAGS()\n BFLAG(\"-p\", port = flag)\n BFLAG(\"-h\", host = flag)\n BFLAG(\"-s\", unixsock = flag)\n TFLAG(\"-v\", verb = 1)\n TFLAG(\"-vv\", verb = 2)\n TFLAG(\"-vvv\", verb = 3)\n AFLAG(\"port\", port = flag)\n AFLAG(\"threads\", nthreads = atoi(flag))\n AFLAG(\"shards\", nshards = atoi(flag))\n AFLAG(\"backlog\", backlog = atoi(flag))\n AFLAG(\"queuesize\", queuesize = atoi(flag))\n AFLAG(\"maxmemory\", maxmemory = flag)\n AFLAG(\"evict\", evict = flag)\n AFLAG(\"reuseport\", reuseport = flag)\n AFLAG(\"uring\", uring = flag)\n AFLAG(\"tcpnodelay\", tcpnodelay = flag)\n AFLAG(\"keepalive\", keepalive = flag)\n AFLAG(\"quickack\", quickack = flag)\n AFLAG(\"trackallocs\", trackallocs = flag)\n AFLAG(\"cas\", usecas = flag)\n AFLAG(\"maxconns\", maxconns = atoi(flag))\n AFLAG(\"loadfactor\", loadfactor = atoi(flag))\n AFLAG(\"sixpack\", keysixpack = flag)\n AFLAG(\"seed\", seed = strtoull(flag, 0, 10))\n AFLAG(\"auth\", auth = flag)\n AFLAG(\"persist\", persist = flag)\n AFLAG(\"noticker\", noticker = flag)\n AFLAG(\"warmup\", warmup = flag)\n#ifndef NOOPENSSL\n // TLS flags\n AFLAG(\"tlsport\", tlsport = flag)\n AFLAG(\"tlscert\", tlscertfile = flag)\n AFLAG(\"tlscacert\", tlscacertfile = flag)\n AFLAG(\"tlskey\", tlskeyfile = flag)\n#endif\n // Hidden or alternative flags\n BFLAG(\"-t\", nthreads = atoi(flag)) // --threads=\n BFLAG(\"-m\", maxmemorymb = flag) // --maxmemory=M\n TFLAG(\"-M\", evict = \"no\") // --evict=no\n END_FLAGS()\n }\n }\n\n usecolor = isatty(fileno(stdout));\n\n if (strcmp(evict, \"yes\") == 0) {\n useevict = true;\n } else if (strcmp(evict, \"no\") == 0) {\n useevict = false;\n } else {\n INVALID_FLAG(\"evict\", evict);\n }\n\n bool usereuseport;\n if (strcmp(reuseport, \"yes\") == 0) {\n usereuseport = true;\n } else if (strcmp(reuseport, \"no\") == 0) {\n usereuseport = false;\n } else {\n INVALID_FLAG(\"reuseport\", reuseport);\n }\n\n if (strcmp(trackallocs, \"yes\") == 0) {\n usetrackallocs = true;\n } else if (strcmp(trackallocs, \"no\") == 0) {\n usetrackallocs = false;\n } else {\n INVALID_FLAG(\"trackallocs\", trackallocs);\n }\n\n bool usetcpnodelay;\n if (strcmp(tcpnodelay, \"yes\") == 0) {\n usetcpnodelay = true;\n } else if (strcmp(tcpnodelay, \"no\") == 0) {\n usetcpnodelay = false;\n } else {\n INVALID_FLAG(\"tcpnodelay\", tcpnodelay);\n }\n\n bool usekeepalive;\n if (strcmp(keepalive, \"yes\") == 0) {\n usekeepalive = true;\n } else if (strcmp(keepalive, \"no\") == 0) {\n usekeepalive = false;\n } else {\n INVALID_FLAG(\"keepalive\", keepalive);\n }\n\n\n bool usecasflag;\n if (strcmp(usecas, \"yes\") == 0) {\n usecasflag = true;\n } else if (strcmp(usecas, \"no\") == 0) {\n usecasflag = false;\n } else {\n INVALID_FLAG(\"usecas\", usecas);\n }\n\n if (maxconns <= 0) {\n maxconns = 1024;\n }\n\n\n#ifndef __linux__\n bool useuring = false;\n#else\n bool useuring;\n if (strcmp(uring, \"yes\") == 0) {\n useuring = true;\n } else if (strcmp(uring, \"no\") == 0) {\n useuring = false;\n } else {\n INVALID_FLAG(\"uring\", uring);\n }\n if (useuring) {\n if (!uring_available()) {\n useuring = false;\n }\n }\n#endif\n\n#ifndef __linux__\n quickack = \"no\";\n#endif\n bool usequickack;\n if (strcmp(quickack, \"yes\") == 0) {\n usequickack = true;\n } else if (strcmp(quickack, \"no\") == 0) {\n usequickack = false;\n } else {\n INVALID_FLAG(\"quickack\", quickack);\n }\n\n if (strcmp(keysixpack, \"yes\") == 0) {\n usesixpack = true;\n } else if (strcmp(keysixpack, \"no\") == 0) {\n usesixpack = false;\n } else {\n INVALID_FLAG(\"sixpack\", keysixpack);\n }\n\n // Threads\n if (nthreads <= 0) {\n nthreads = sys_nprocs();\n } else if (nthreads > 4096) {\n nthreads = 4096; \n }\n\n if (nshards == 0) {\n nshards = calc_nshards(nthreads);\n }\n if (nshards <= 0 || nshards > 65536) {\n nshards = 65536;\n }\n\n if (loadfactor < MINLOADFACTOR_RH) {\n loadfactor = MINLOADFACTOR_RH;\n printf(\"# loadfactor minumum set to %d\\n\", MINLOADFACTOR_RH);\n } else if (loadfactor > MAXLOADFACTOR_RH) {\n loadfactor = MAXLOADFACTOR_RH;\n printf(\"# loadfactor maximum set to %d\\n\", MAXLOADFACTOR_RH);\n }\n\n if (queuesize < 1) {\n queuesize = 1;\n printf(\"# queuesize adjusted to 1\\n\");\n } else if (queuesize > 4096) {\n queuesize = 4096;\n printf(\"# queuesize adjusted to 4096\\n\");\n }\n\n if (maxmemorymb) {\n size_t sz = strlen(maxmemorymb)+2;\n char *str = xmalloc(sz);\n snprintf(str, sz, \"%sM\", maxmemorymb);\n maxmemory = str;\n }\n\n if (!*port || strcmp(port, \"0\") == 0) {\n port = \"\";\n }\n\n if (!*tlsport || strcmp(tlsport, \"0\") == 0) {\n usetls = false;\n tlsport = \"\";\n } else {\n usetls = true;\n tls_init();\n }\n\n if (*auth) {\n useauth = true;\n }\n setmaxrlimit();\n sysmem = sys_memory();\n memlimit = calc_memlimit(maxmemory);\n\n if (memlimit == SIZE_MAX) {\n evict = \"no\";\n useevict = false;\n }\n\n struct pogocache_opts opts = {\n .yield = yield,\n .seed = seed,\n .malloc = xmalloc,\n .free = xfree,\n .nshards = nshards,\n .loadfactor = loadfactor,\n .usecas = usecasflag,\n .evicted = evicted,\n .allowshrink = true,\n .usethreadbatch = true,\n };\n // opts.yield = 0;\n\n cache = pogocache_new(&opts);\n if (!cache) {\n perror(\"pogocache_new\");\n abort();\n }\n\n // Print the program details\n printf(\"* Pogocache (pid: %d, arch: %s%s, version: %s, git: %s)\\n\",\n getpid(), sys_arch(), sizeof(uintptr_t)==4?\", mode: 32-bit\":\"\", version,\n githash);\n char buf0[64], buf1[64];\n char buf2[64];\n if (memlimit < SIZE_MAX) {\n snprintf(buf2, sizeof(buf2), \"%.0f%%/%s\", (double)memlimit/sysmem*100.0,\n memstr(memlimit, buf1));\n } else {\n strcpy(buf2, \"unlimited\");\n }\n printf(\"* Memory (system: %s, max: %s, evict: %s)\\n\", memstr(sysmem, buf0),\n buf2, evict);\n printf(\"* Features (verbosity: %s, sixpack: %s, cas: %s, persist: %s, \"\n \"uring: %s)\\n\",\n verb==0?\"normal\":verb==1?\"verbose\":verb==2?\"very\":\"extremely\",\n keysixpack, usecas, *persist?persist:\"none\", useuring?\"yes\":\"no\");\n char tcp_addr[256];\n snprintf(tcp_addr, sizeof(tcp_addr), \"%s:%s\", host, port);\n printf(\"* Network (port: %s, unixsocket: %s, backlog: %d, reuseport: %s, \"\n \"maxconns: %d)\\n\", *port?port:\"none\", *unixsock?unixsock:\"none\",\n backlog, reuseport, maxconns);\n printf(\"* Socket (tcpnodelay: %s, keepalive: %s, quickack: %s)\\n\",\n tcpnodelay, keepalive, quickack);\n printf(\"* Threads (threads: %d, queuesize: %d)\\n\", nthreads, queuesize);\n printf(\"* Shards (shards: %d, loadfactor: %d%%)\\n\", nshards, loadfactor);\n printf(\"* Security (auth: %s, tlsport: %s)\\n\", \n strlen(auth)>0?\"enabled\":\"disabled\", *tlsport?tlsport:\"none\");\n if (strcmp(noticker,\"yes\") == 0) {\n printf(\"# NO TICKER\\n\");\n } else {\n pthread_t th;\n int ret = pthread_create(&th, 0, ticker, 0);\n if (ret == -1) {\n perror(\"# pthread_create(ticker)\");\n exit(1);\n }\n }\n#ifdef DATASETOK\n printf(\"# DATASETOK\\n\");\n#endif\n#ifdef CMDGETNIL\n printf(\"# CMDGETNIL\\n\");\n#endif\n#ifdef CMDSETOK\n printf(\"# CMDSETOK\\n\");\n#endif\n#ifdef ENABLELOADREAD\n printf(\"# ENABLELOADREAD\\n\");\n#endif\n struct net_opts nopts = {\n .host = host,\n .port = port,\n .tlsport = tlsport,\n .unixsock = unixsock,\n .reuseport = usereuseport,\n .tcpnodelay = usetcpnodelay,\n .keepalive = usekeepalive,\n .quickack = usequickack,\n .backlog = backlog,\n .queuesize = queuesize,\n .nthreads = nthreads,\n .nowarmup = strcmp(warmup, \"no\") == 0,\n .nouring = !useuring,\n .listening = listening,\n .ready = ready,\n .data = evdata,\n .opened = evopened,\n .closed = evclosed,\n .maxconns = maxconns,\n };\n net_main(&nopts);\n return 0;\n}\n"], ["/pogocache/src/postgres.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit postgres.c provides the parser for the Postgres wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n\n// #define PGDEBUG\n\n#define TEXTOID 25\n#define BYTEAOID 17\n\nextern const char *version;\nextern const char *auth;\n\n#ifdef PGDEBUG\n#define dprintf printf\n#else\n#define dprintf(...)\n#endif\n\nstatic void print_packet(const char *data, size_t len) {\n dprintf(\". PACKET=%03zu [ \", len);\n for (size_t i = 0; i < len; i++) {\n printf(\"%02X \", (unsigned char)data[i]);\n }\n dprintf(\"]\\n\");\n dprintf(\". [\");\n for (size_t i = 0; i < len; i++) {\n unsigned char ch = data[i];\n if (ch < ' ') {\n ch = '?';\n }\n dprintf(\"%c\", ch);\n }\n dprintf(\"]\\n\");\n}\n\nstatic int32_t read_i32(const char *data) {\n return ((uint32_t)(uint8_t)data[0] << 24) |\n ((uint32_t)(uint8_t)data[1] << 16) |\n ((uint32_t)(uint8_t)data[2] << 8) |\n ((uint32_t)(uint8_t)data[3] << 0);\n}\n\nstatic void write_i32(char *data, int32_t x) {\n data[0] = (uint8_t)(((uint32_t)x) >> 24) & 0xFF;\n data[1] = (uint8_t)(((uint32_t)x) >> 16) & 0xFF;\n data[2] = (uint8_t)(((uint32_t)x) >> 8) & 0xFF;\n data[3] = (uint8_t)(((uint32_t)x) >> 0) & 0xFF;\n}\n\nstatic int16_t read_i16(const char *data) {\n return ((uint16_t)(uint8_t)data[0] << 8) |\n ((uint16_t)(uint8_t)data[1] << 0);\n}\nstatic void write_i16(char *data, int16_t x) {\n data[0] = (uint8_t)(((uint16_t)x) >> 8) & 0xFF;\n data[1] = (uint8_t)(((uint16_t)x) >> 0) & 0xFF;\n}\n\n// parse_begin is called to begin parsing a client message.\n#define parse_begin() \\\n const char *p = data; \\\n const char *e = p+len; \\\n (void)args, (void)pg, (void)e;\n\n// parse_end is called when parsing client message is complete.\n// This will check that the position of the client stream matches the\n// expected lenght provided by the client. \n#define parse_end() \\\n if ((size_t)(p-data) != len) { \\\n return -1; \\\n }\n\n#define parse_cstr() ({ \\\n const char *cstr = 0; \\\n const char *s = p; \\\n while (p < e) { \\\n if (*p == '\\0') { \\\n cstr = s; \\\n p++; \\\n break; \\\n } \\\n p++; \\\n } \\\n if (!cstr) { \\\n return -1; \\\n } \\\n cstr; \\\n}) \n\n#define parse_int16() ({ \\\n if (e-p < 2) { \\\n return -1; \\\n } \\\n int16_t x = read_i16(p); \\\n p += 2; \\\n x; \\\n})\n\n#define parse_byte() ({ \\\n if (e-p < 1) { \\\n return -1; \\\n } \\\n uint8_t x = *p; \\\n p += 1; \\\n x; \\\n})\n\n#define parse_int32() ({ \\\n if (e-p < 4) { \\\n return -1; \\\n } \\\n int32_t x = read_i32(p); \\\n p += 4; \\\n x; \\\n})\n\n#define parse_bytes(n) ({ \\\n if (e-p < n) { \\\n return -1; \\\n } \\\n const void *s = p; \\\n p += (n); \\\n s; \\\n})\n\nstatic void arg_append_unescape_simplestr(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n char *str2 = xmalloc(slen+1);\n for (size_t i = 0; i < str2len; i++) {\n if (str[i] == '\\'' && str[i+1] == '\\'') {\n i++;\n }\n str2[str2len++] = str[i];\n }\n args_append(args, str2, str2len, false);\n xfree(str2);\n}\n\nstatic void pg_statement_free(struct pg_statement *statement) {\n args_free(&statement->args);\n buf_clear(&statement->argtypes);\n}\n\n\nstatic void pg_portal_free(struct pg_portal *portal) {\n args_free(&portal->params);\n}\n\nstatic void statments_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n pg_statement_free(&statement);\n }\n hashmap_free(map);\n}\n\nstatic void portals_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n pg_portal_free(&portal);\n }\n hashmap_free(map);\n}\n\nstruct pg *pg_new(void) {\n struct pg *pg = xmalloc(sizeof(struct pg));\n memset(pg, 0, sizeof(struct pg));\n pg->oid = TEXTOID;\n return pg;\n}\n\nvoid pg_free(struct pg *pg) {\n if (!pg) {\n return;\n }\n xfree(pg->application_name);\n xfree(pg->database);\n xfree(pg->user);\n buf_clear(&pg->buf);\n statments_free(pg->statements);\n portals_free(pg->portals);\n args_free(&pg->targs);\n // args_free(&pg->xargs);\n xfree(pg->desc);\n xfree(pg);\n}\n\nstatic uint64_t pg_statement_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n return hashmap_murmur(statement.name, strlen(statement.name), seed0, seed1);\n}\n\nstatic uint64_t pg_portal_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n return hashmap_murmur(portal.name, strlen(portal.name), seed0, seed1);\n}\n\nstatic int pg_statement_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_statement stmta;\n memcpy(&stmta, a, sizeof(struct pg_statement));\n struct pg_statement stmtb;\n memcpy(&stmtb, b, sizeof(struct pg_statement));\n return strcmp(stmta.name, stmtb.name);\n}\n\nstatic int pg_portal_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_portal portala;\n memcpy(&portala, a, sizeof(struct pg_portal));\n struct pg_portal portalb;\n memcpy(&portalb, b, sizeof(struct pg_portal));\n return strcmp(portala.name, portalb.name);\n}\n\nstatic void portal_insert(struct pg *pg, struct pg_portal *portal) {\n (void)portal;\n if (!pg->portals) {\n pg->portals = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_portal), 0, 0, 0, pg_portal_hash, \n pg_portal_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->portals, portal);\n if (ptr) {\n struct pg_portal old;\n memcpy(&old, ptr, sizeof(struct pg_portal));\n pg_portal_free(&old);\n }\n}\n\nstatic void statement_insert(struct pg *pg, struct pg_statement *stmt) {\n if (!pg->statements) {\n pg->statements = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_statement), 0, 0, 0, pg_statement_hash, \n pg_statement_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->statements, stmt);\n if (ptr) {\n struct pg_statement old;\n memcpy(&old, ptr, sizeof(struct pg_statement));\n pg_statement_free(&old);\n }\n}\n\nstatic bool statement_get(struct pg *pg, const char *name, \n struct pg_statement *stmt)\n{\n if (!pg->statements) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_statement key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->statements, &key);\n if (!ptr) {\n return false;\n }\n memcpy(stmt, ptr, sizeof(struct pg_statement));\n return true;\n}\n\nstatic bool portal_get(struct pg *pg, const char *name, \n struct pg_portal *portal)\n{\n if (!pg->portals) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_portal key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->portals, &key);\n if (!ptr) {\n return false;\n }\n memcpy(portal, ptr, sizeof(struct pg_portal));\n return true;\n}\n\nstatic const uint8_t hextoks[256] = { \n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4,5,6,7,8,9,0,0,0,0,0,0,\n 0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n};\n\nstatic uint32_t decode_hex(const uint8_t *str) {\n return (((int)hextoks[str[0]])<<12) | (((int)hextoks[str[1]])<<8) |\n (((int)hextoks[str[2]])<<4) | (((int)hextoks[str[3]])<<0);\n}\n\nstatic bool is_surrogate(uint32_t cp) {\n return cp > 55296 && cp < 57344;\n}\n\nstatic uint32_t decode_codepoint(uint32_t cp1, uint32_t cp2) {\n return cp1 > 55296 && cp1 < 56320 && cp2 > 56320 && cp2 < 57344 ?\n ((cp1 - 55296) << 10) | ((cp2 - 56320) + 65536) :\n 65533;\n}\n\nstatic inline int encode_codepoint(uint8_t dst[], uint32_t cp) {\n if (cp < 128) {\n dst[0] = cp;\n return 1;\n } else if (cp < 2048) {\n dst[0] = 192 | (cp >> 6);\n dst[1] = 128 | (cp & 63);\n return 2;\n } else if (cp > 1114111 || is_surrogate(cp)) {\n cp = 65533; // error codepoint\n }\n if (cp < 65536) {\n dst[0] = 224 | (cp >> 12);\n dst[1] = 128 | ((cp >> 6) & 63);\n dst[2] = 128 | (cp & 63);\n return 3;\n }\n dst[0] = 240 | (cp >> 18);\n dst[1] = 128 | ((cp >> 12) & 63);\n dst[2] = 128 | ((cp >> 6) & 63);\n dst[3] = 128 | (cp & 63);\n return 4;\n}\n\n// for_each_utf8 iterates over each UTF-8 bytes in jstr, unescaping along the\n// way. 'f' is a loop expression that will make available the 'ch' char which \n// is just a single byte in a UTF-8 series.\n// this is taken from https://github.com/tidwall/json.c\n#define for_each_utf8(jstr, len, f) { \\\n size_t nn = (len); \\\n int ch = 0; \\\n (void)ch; \\\n for (size_t ii = 0; ii < nn; ii++) { \\\n if ((jstr)[ii] != '\\\\') { \\\n ch = (jstr)[ii]; \\\n if (1) f \\\n continue; \\\n }; \\\n ii++; \\\n if (ii == nn) break; \\\n switch ((jstr)[ii]) { \\\n case '\\\\': ch = '\\\\'; break; \\\n case '/' : ch = '/'; break; \\\n case 'b' : ch = '\\b'; break; \\\n case 'f' : ch = '\\f'; break; \\\n case 'n' : ch = '\\n'; break; \\\n case 'r' : ch = '\\r'; break; \\\n case 't' : ch = '\\t'; break; \\\n case '\"' : ch = '\"'; break; \\\n case 'u' : \\\n if (ii+5 > nn) { nn = 0; continue; }; \\\n uint32_t cp = decode_hex((jstr)+ii+1); \\\n ii += 5; \\\n if (is_surrogate(cp)) { \\\n if (nn-ii >= 6 && (jstr)[ii] == '\\\\' && (jstr)[ii+1] == 'u') { \\\n cp = decode_codepoint(cp, decode_hex((jstr)+ii+2)); \\\n ii += 6; \\\n } \\\n } \\\n uint8_t _bytes[4]; \\\n int _n = encode_codepoint(_bytes, cp); \\\n for (int _j = 0; _j < _n; _j++) { \\\n ch = _bytes[_j]; \\\n if (1) f \\\n } \\\n ii--; \\\n continue; \\\n default: \\\n continue; \\\n }; \\\n if (1) f \\\n } \\\n}\n\nstatic void arg_append_unescape_str(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n uint8_t *str2 = xmalloc(slen+1);\n for_each_utf8((uint8_t*)str, slen, {\n str2[str2len++] = ch;\n });\n args_append(args, (char*)str2, str2len, false);\n xfree(str2);\n}\n\n// Very simple map to stores all params numbers.\nstruct pmap {\n int count;\n int nbuckets;\n uint16_t *buckets;\n uint16_t def[8];\n};\n\nstatic void pmap_init(struct pmap *map) {\n memset(map, 0, sizeof(struct pmap));\n map->nbuckets = sizeof(map->def)/sizeof(uint16_t);\n map->buckets = map->def;\n}\n\nstatic void pmap_free(struct pmap *map) {\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n}\n\nstatic void pmap_insert0(uint16_t *buckets, int nbuckets, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%nbuckets;\n while (1) {\n if (buckets[i] == 0) {\n buckets[i] = param;\n return;\n }\n i = (i+1)%nbuckets;\n }\n}\n\nstatic void pmap_grow(struct pmap *map) {\n int nbuckets2 = map->nbuckets*2;\n uint16_t *buckets2 = xmalloc(nbuckets2*sizeof(uint16_t));\n memset(buckets2, 0, nbuckets2*sizeof(uint16_t));\n for (int i = 0; i < map->nbuckets; i++) {\n if (map->buckets[i]) {\n pmap_insert0(buckets2, nbuckets2, map->buckets[i]);\n }\n }\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n map->buckets = buckets2;\n map->nbuckets = nbuckets2;\n}\n\nstatic void pmap_insert(struct pmap *map, uint16_t param) {\n assert(param != 0);\n if (map->count == (map->nbuckets>>1)+(map->nbuckets>>2)) {\n pmap_grow(map);\n }\n pmap_insert0(map->buckets, map->nbuckets, param);\n map->count++;\n}\n\nstatic bool pmap_exists(struct pmap *map, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%map->nbuckets;\n while (1) {\n if (map->buckets[i] == 0) {\n return false;\n }\n if (map->buckets[i] == param) {\n return true;\n }\n i = (i+1)%map->nbuckets;\n }\n}\n\nstatic bool parse_query_args(const char *query, struct args *args, \n int *nparams, struct buf *argtypes)\n{\n dprintf(\"parse_query: [%s]\\n\", query);\n struct pmap pmap;\n pmap_init(&pmap);\n\n // loop through each keyword\n while (isspace(*query)) {\n query++;\n }\n bool ok = false;\n bool esc = false;\n const char *str;\n const char *p = query;\n bool join = false;\n while (*p) {\n switch (*p) {\n case ';':\n goto break_while;\n case '\\\"':\n // identifier\n parse_errorf(\"idenifiers not allowed\");\n goto done;\n case '\\'':\n // simple string\n p++;\n str = p;\n esc = false;\n while (*p) {\n if (*p == '\\'') {\n if (*(p+1) == '\\'') {\n esc = true;\n p += 2;\n continue;\n }\n break;\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_simplestr(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n case '$':\n // dollar-quote or possible param\n if (*(p+1) >= '0' && *(p+1) <= '9') {\n char *e = 0;\n long param = strtol(p+1, &e, 10);\n if (param == 0 || param > 0xFFFF) {\n parse_errorf(\"there is no parameter $%ld\", param);\n goto done;\n }\n pmap_insert(&pmap, param);\n args_append(args, p, e-p, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'P'+join);\n join = *e && !isspace(*e);\n }\n p = e;\n continue;\n }\n // dollar-quote strings not\n parse_errorf(\"dollar-quote strings not allowed\");\n goto done;\n case 'E': case 'e':\n if (*(p+1) == '\\'') {\n // escaped string\n p += 2;\n str = p;\n while (*p) {\n if (*p == '\\\\') {\n esc = true;\n } else if (*p == '\\'') {\n size_t x = 0;\n while (*(p-x-1) == '\\\\') {\n x++;\n }\n if ((x%2)==0) {\n break;\n }\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_str(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n }\n // fallthrough\n default:\n if (isspace(*p)) {\n p++;\n continue;\n }\n // keyword\n const char *keyword = p;\n while (*p && !isspace(*p)) {\n if (*p == ';' || *p == '\\'' || *p == '\\\"' || *p == '$') {\n break;\n }\n p++;\n }\n size_t keywordlen = p-keyword;\n args_append(args, keyword, keywordlen, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *p && !isspace(*p);\n }\n while (isspace(*p)) {\n p++;\n }\n continue;\n }\n p++;\n }\nbreak_while:\n while (*p) {\n if (*p != ';') {\n parse_errorf(\"unexpected characters at end of query\");\n goto done;\n }\n p++;\n }\n ok = true;\ndone:\n if (ok) {\n // check params\n for (int i = 0; i < pmap.count; i++) {\n if (!pmap_exists(&pmap, i+1)) {\n parse_errorf(\"missing parameter $%d\", i+1);\n ok = false;\n break;\n }\n }\n }\n *nparams = pmap.count;\n pmap_free(&pmap);\n if (argtypes) {\n buf_append_byte(argtypes, '\\0');\n }\n return ok;\n}\n\nstatic bool parse_cache_query_args(const char *query, struct args *args,\n int *maxparam, struct buf *argtypes)\n{\n while (isspace(*query)) {\n query++;\n }\n if (!parse_query_args(query, args, maxparam, argtypes)) {\n return false;\n }\n#ifdef PGDEBUG\n args_print(args);\n#endif\n if (argtypes) {\n dprintf(\"argtypes: [%s]\\n\", argtypes->data);\n }\n return true;\n}\n\nstatic size_t parseQ(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Query\n dprintf(\">>> Query\\n\");\n parse_begin();\n const char *query = parse_cstr();\n parse_end();\n int nparams = 0;\n bool pok = parse_cache_query_args(query, args, &nparams, 0);\n if (!pok) {\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (nparams > 0) {\n parse_seterror(\"query cannot have parameters\");\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (args->len == 0) {\n pg->empty_query = 1;\n }\n return len;\n}\n\nstatic size_t parseP(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Parse\n dprintf(\"<<< Parse\\n\");\n // print_packet(data, len);\n parse_begin();\n const char *stmt_name = parse_cstr();\n const char *query = parse_cstr();\n uint16_t num_param_types = parse_int16();\n // dprintf(\". Parse [%s] [%s] [%d]\\n\", stmt_name, query,\n // (int)num_param_types);\n for (uint16_t i = 0; i < num_param_types; i++) {\n int32_t param_type = parse_int32();\n (void)param_type;\n // dprintf(\". [%d]\\n\", param_type);\n }\n parse_end();\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n int nparams = 0;\n struct buf argtypes = { 0 };\n bool ok = parse_cache_query_args(query, args, &nparams, &argtypes);\n if (!ok) {\n pg->error = 1;\n args_clear(args);\n buf_clear(&argtypes);\n return len;\n }\n // copy over last statement\n struct pg_statement stmt = { 0 };\n strcpy(stmt.name, stmt_name);\n stmt.nparams = nparams;\n // copy over parsed args\n for (size_t i = 0; i < args->len; i++) {\n args_append(&stmt.args, args->bufs[i].data, args->bufs[i].len, false);\n }\n args_clear(args);\n stmt.argtypes = argtypes;\n statement_insert(pg, &stmt);\n pg->parse = 1;\n return len;\n}\n\nstatic size_t parseD(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Describe\n dprintf(\"<<< Describe\\n\");\n if (pg->describe) {\n // Already has a describe in a sequence\n pg->error = 1;\n parse_errorf(\"double describe not allowed\");\n return -1;\n }\n // print_packet(data, len);\n parse_begin();\n uint8_t type = parse_byte();\n const char *name = parse_cstr();\n parse_end();\n\n dprintf(\". Describe [%c] [%s]\\n\", type, name);\n if (type == 'P' || type == 'P'+1) {\n struct pg_portal portal;\n if (!portal_get(pg, name, &portal)) {\n parse_errorf(\"portal not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('T')\n // Int32 length\n // Int16 field_count\n // Field[] fields\n // all fields are unnamed text\n char field[] = { \n 0x00, // \"\\0\" (field name)\n 0x00, 0x00, 0x00, 0x00, // table_oid = 0\n 0x00, 0x00, // column_attr_no = 0\n 0x00, 0x00, 0x00, pg->oid, // type_oid = 25 (text)\n 0xFF, 0xFF, // type_size = -1\n 0xFF, 0xFF, 0xFF, 0xFF, // type_modifier = -1\n 0x00, 0x00, // format_code = 0 (text)\n };\n static_assert(sizeof(field) == 19, \"\");\n size_t size = 1+4+2+portal.params.len*sizeof(field);\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 'T';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, portal.params.len);\n p1 += 2;\n for (size_t i = 0; i < portal.params.len; i++) {\n memcpy(p1, field, sizeof(field));\n p1 += sizeof(field);\n }\n pg->desclen = size;\n return len;\n }\n\n if (type == 'S') {\n struct pg_statement stmt;\n if (!statement_get(pg, name, &stmt)) {\n parse_errorf(\"statement not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('t')\n // Int32 length\n // Int16 num_params\n // Int32[] param_type_oids\n size_t size = 1+4+2+stmt.nparams*4;\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 't';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, stmt.nparams);\n p1 += 2;\n for (int i = 0; i < stmt.nparams; i++) {\n write_i32(p1, pg->oid);\n p1 += 4;\n }\n pg->desclen = size;\n pg->describe = 1;\n return len;\n }\n parse_errorf(\"unsupported describe type '%c'\", type);\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseB(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n\n // Bind\n dprintf(\"<<< Bind\\n\");\n\n // print_packet(data, len);\n\n // X Byte1('B') # Bind message identifier\n // X Int32 length # Message length including self\n //\n // String portal_name # Destination portal (\"\" = unnamed)\n // String statement_name # Prepared statement name (from Parse)\n // Int16 num_format_codes # 0 = all text, 1 = one for all, or N\n // [Int16] format_codes # 0 = text, 1 = binary\n // Int16 num_parameters\n // [parameter values]\n // Int16 num_result_formats\n // [Int16] result_format_codes\n\n parse_begin();\n const char *portal_name = parse_cstr();\n const char *stmt_name = parse_cstr();\n int num_formats = parse_int16();\n for (int i = 0; i < num_formats; i++) {\n int format = parse_int16();\n if (format != 0 && format != 1) {\n parse_errorf(\"only text or binary format allowed\");\n pg->error = 1;\n return len;\n }\n }\n uint16_t num_params = parse_int16();\n args_clear(&pg->targs);\n for (int i = 0; i < num_params; i++) {\n int32_t len = parse_int32();\n if (len <= 0) {\n // Nulls are empty strings\n len = 0;\n }\n const char *b = parse_bytes(len);\n args_append(&pg->targs, b, len, false);\n }\n // ignore result formats\n uint16_t num_result_formats = parse_int16();\n for (int i = 0; i < num_result_formats; i++) {\n int result_format_codes = parse_int16();\n (void)result_format_codes;\n }\n parse_end();\n\n if (strlen(portal_name) >= PGNAMEDATALEN) {\n parse_seterror(\"portal name too large\");\n pg->error = 1;\n return len;\n }\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n struct pg_portal portal = { 0 };\n strcpy(portal.name, portal_name);\n strcpy(portal.stmt, stmt_name);\n memcpy(&portal.params, &pg->targs, sizeof(struct args));\n memset(&pg->targs, 0, sizeof(struct args));\n portal_insert(pg, &portal);\n pg->bind = 1;\n return len;\n}\n\nstatic size_t parseX(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Close\n dprintf(\"<<< Close\\n\");\n parse_begin();\n parse_end();\n pg->close = 1;\n return len;\n}\n\nstatic size_t parseE(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Execute\n dprintf(\"<<< Execute\\n\");\n parse_begin();\n const char *portal_name = parse_cstr();\n size_t max_rows = parse_int32();\n parse_end();\n struct pg_portal portal;\n if (!portal_get(pg, portal_name, &portal)) {\n parse_seterror(\"portal not found\");\n pg->error = 1;\n return len;\n }\n struct pg_statement stmt;\n if (!statement_get(pg, portal.stmt, &stmt)) {\n parse_seterror(\"statement not found\");\n pg->error = 1;\n return len;\n }\n if ((size_t)stmt.nparams != portal.params.len) {\n parse_seterror(\"portal params mismatch\");\n pg->error = 1;\n return len;\n }\n // ignore max_rows\n (void)max_rows;\n\n // \n args_clear(&pg->targs);\n for (size_t i = 0; i < stmt.args.len; i++) {\n const char *arg = stmt.args.bufs[i].data;\n size_t arglen = stmt.args.bufs[i].len;\n char atype = stmt.argtypes.data[i];\n dprintf(\"[%.*s] [%c]\\n\", (int)arglen, arg, atype);\n bool join = false;\n switch (atype) {\n case 'A'+1:\n atype = 'A';\n join = true;\n break;\n case 'P':\n join = false;\n break;\n case 'P'+1:\n atype = 'P';\n join = true;\n break;\n }\n if (atype == 'P') {\n if (arglen == 0 || arg[0] != '$') {\n goto internal_error;\n }\n uint64_t x;\n bool ok = parse_u64(arg+1, arglen-1, &x);\n if (!ok || x == 0 || x > 0xFFFF) {\n goto internal_error;\n }\n size_t paramidx = x-1;\n if (paramidx >= portal.params.len) {\n goto internal_error;\n }\n arg = portal.params.bufs[paramidx].data;\n arglen = portal.params.bufs[paramidx].len;\n }\n if (join) {\n assert(pg->targs.len > 0);\n buf_append(&pg->targs.bufs[pg->targs.len-1], arg, arglen);\n } else {\n args_append(&pg->targs, arg, arglen, false);\n }\n }\n\n struct args swapargs = *args;\n *args = pg->targs;\n pg->targs = swapargs;\n\n#ifdef PGDEBUG\n args_print(args);\n#endif\n\n pg->execute = 1;\n return len;\ninternal_error:\n parse_seterror(\"portal params internal error\");\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseS(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args;\n // Sync\n dprintf(\"<<< Sync\\n\");\n // print_packet(data, len);\n parse_begin();\n parse_end();\n pg->sync = 1;\n return len;\n}\n\nstatic size_t parsep(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // PasswordMessage\n parse_begin();\n const char *password = parse_cstr();\n parse_end();\n if (strcmp(password, auth) != 0) {\n parse_seterror(\n \"WRONGPASS invalid username-password pair or user is disabled.\");\n return -1;\n }\n pg->auth = 1;\n return len;\n}\n\nstatic ssize_t parse_message(const char *data, size_t len, struct args *args,\n struct pg *pg)\n{\n if (len < 5) {\n return 0;\n }\n int msgbyte = data[0];\n size_t msglen = read_i32(data+1);\n if (len < msglen+1) {\n return 0;\n }\n msglen -= 4;\n data += 5;\n ssize_t ret;\n switch (msgbyte) {\n case 'Q':\n ret = parseQ(data, msglen, args, pg);\n break;\n case 'P':\n ret = parseP(data, msglen, args, pg);\n break;\n case 'X':\n ret = parseX(data, msglen, args, pg);\n break;\n case 'E':\n ret = parseE(data, msglen, args, pg);\n break;\n case 'p': // lowercase\n ret = parsep(data, msglen, args, pg);\n break;\n case 'D':\n ret = parseD(data, msglen, args, pg);\n break;\n case 'B':\n ret = parseB(data, msglen, args, pg);\n break;\n case 'S':\n ret = parseS(data, msglen, args, pg);\n break;\n default:\n pg->error = 1;\n parse_errorf(\"unknown message '%c'\", msgbyte);\n ret = msglen;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+5;\n}\n\nstatic ssize_t parse_magic_ssl(const char *data, size_t len, struct pg *pg) {\n (void)data;\n // SSLRequest\n pg->ssl = 1;\n return len;\n}\n\nstatic ssize_t parse_magic_proto3(const char *data, size_t len, struct pg *pg) {\n // StartupMessage\n const char *p = (void*)data;\n const char *e = p+len;\n // Read parameters\n const char *user = \"\";\n const char *database = \"\";\n const char *application_name = \"\";\n const char *client_encoding = \"\";\n const char *name = 0;\n const char *s = (char*)p;\n while (p < e) {\n if (*p == '\\0') {\n if (s != p) {\n if (name) {\n if (strcmp(name, \"database\") == 0) {\n database = s;\n } else if (strcmp(name, \"application_name\") == 0) {\n application_name = s;\n } else if (strcmp(name, \"client_encoding\") == 0) {\n client_encoding = s;\n } else if (strcmp(name, \"user\") == 0) {\n user = s;\n }\n name = 0;\n } else {\n name = s;\n }\n }\n s = p+1;\n }\n p++;\n }\n // dprintf(\". database=%s, application_name=%s, client_encoding=%s, \"\n // \"user=%s\\n\", database, application_name, client_encoding, user);\n if (*client_encoding && strcmp(client_encoding, \"UTF8\") != 0) {\n printf(\"# Invalid Postgres client_encoding (%s)\\n\",\n client_encoding);\n return -1;\n }\n pg->user = xmalloc(strlen(user)+1);\n strcpy((char*)pg->user, user);\n pg->database = xmalloc(strlen(database)+1);\n strcpy((char*)pg->database, database);\n pg->application_name = xmalloc(strlen(application_name)+1);\n strcpy((char*)pg->application_name, application_name);\n pg->startup = 1;\n return p-data;\n}\n\nstatic ssize_t parse_magic_cancel(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n parse_errorf(\"cancel message unsupported\");\n return -1;\n}\n\nstatic ssize_t parse_magic(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n if (len < 4) {\n return 0;\n }\n size_t msglen = read_i32(data);\n if (msglen > 65536) {\n parse_errorf(\"message too large\");\n return -1;\n }\n if (len < msglen) {\n return 0;\n }\n if (msglen < 8) {\n parse_errorf(\"invalid message\");\n return -1;\n }\n // dprintf(\"parse_magic\\n\");\n uint32_t magic = read_i32(data+4);\n data += 8;\n msglen -= 8;\n ssize_t ret;\n switch (magic) {\n case 0x04D2162F: \n ret = parse_magic_ssl(data, msglen, pg);\n break;\n case 0x00030000: \n ret = parse_magic_proto3(data, msglen, pg);\n break;\n case 0xFFFF0000: \n ret = parse_magic_cancel(data, msglen, pg);\n break;\n default:\n parse_errorf(\"Protocol error: unknown magic number %08x\", magic);\n ret = -1;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+8;\n}\n\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pgptr)\n{\n (void)print_packet;\n // print_packet(data, len);\n struct pg *pg = *pgptr;\n if (!pg) {\n pg = pg_new();\n *pgptr = pg;\n }\n pg->error = 0;\n if (len == 0) {\n return 0;\n }\n if (data[0] == 0) {\n return parse_magic(data, len, pg);\n }\n return parse_message(data, len, args, pg);\n}\n\nvoid pg_write_auth(struct conn *conn, unsigned char code) {\n unsigned char bytes[] = { \n 'R', 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n}\n\nvoid pg_write_ready(struct conn *conn, unsigned char code) {\n if (!pg_execute(conn)) {\n unsigned char bytes[] = { \n 'Z', 0x0, 0x0, 0x0, 0x5, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n }\n}\n\nvoid pg_write_status(struct conn *conn, const char *key, const char *val) {\n size_t keylen = strlen(key);\n size_t vallen = strlen(val);\n int32_t size = 4+keylen+1+vallen+1;\n char *bytes = xmalloc(1+size);\n bytes[0] = 'S';\n write_i32(bytes+1, size);\n memcpy(bytes+1+4,key,keylen+1);\n memcpy(bytes+1+4+keylen+1,val,vallen+1);\n conn_write_raw(conn, bytes, 1+size);\n xfree(bytes);\n}\n\nvoid pg_write_row_desc(struct conn *conn, const char **fields, int nfields){\n size_t size = 1+4+2;\n for (int i = 0; i < nfields; i++) {\n size += strlen(fields[i])+1;\n size += 4+2+4+2+4+2;\n }\n int oid = conn_pg(conn)->oid;\n char *bytes = xmalloc(size);\n bytes[0] = 'T';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, nfields); // field_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < nfields; i++) {\n size_t fsize = strlen(fields[i]);\n memcpy(p, fields[i], fsize+1);\n p += fsize+1;\n write_i32(p, 0); // table_oid\n p += 4;\n write_i16(p, 0); // column_attr_number\n p += 2;\n write_i32(p, oid); // type_oid\n p += 4;\n write_i16(p, -1); // type_size\n p += 2;\n write_i32(p, -1); // type_modifier\n p += 4;\n write_i16(p, 1); // format_code\n p += 2;\n }\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_row_data(struct conn *conn, const char **cols, \n const size_t *collens, int ncols)\n{\n size_t size = 1+4+2;\n for (int i = 0; i < ncols; i++) {\n size += 4+collens[i];\n }\n char *bytes = xmalloc(size);\n bytes[0] = 'D';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, ncols); // column_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < ncols; i++) {\n write_i32(p, collens[i]); // column_length\n p += 4;\n#ifdef PGDEBUG\n printf(\" ROW >>>> len:%zu [\", collens[i]);\n binprint(cols[i], collens[i]);\n printf(\"]\\n\");\n#endif\n memcpy(p, cols[i], collens[i]); // column_data\n p += collens[i];\n }\n \n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_complete(struct conn *conn, const char *tag){\n size_t taglen = strlen(tag);\n size_t size = 1+4+taglen+1;\n char *bytes = xmalloc(size);\n bytes[0] = 'C';\n write_i32(bytes+1, size-1); // message_size\n memcpy(bytes+1+4, tag, taglen+1);\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_completef(struct conn *conn, const char *tag_format, ...){\n // initializing list pointer\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_complete(conn, tag);\n}\n\nvoid pg_write_simple_row_data_ready(struct conn *conn, const char *desc,\n const void *row, size_t len, const char *tag)\n{\n pg_write_row_desc(conn, (const char*[]){ desc }, 1);\n pg_write_row_data(conn, (const char*[]){ row }, (size_t[]){ len }, 1);\n pg_write_complete(conn, tag);\n pg_write_ready(conn, 'I');\n}\n\nvoid pg_write_simple_row_str_ready(struct conn *conn, const char *desc,\n const char *row, const char *tag)\n{\n pg_write_simple_row_data_ready(conn, desc, row, strlen(row), tag);\n}\n\nvoid pg_write_simple_row_i64_ready(struct conn *conn, const char *desc,\n int64_t row, const char *tag)\n{\n char val[32];\n snprintf(val, sizeof(val), \"%\" PRIi64, row);\n pg_write_simple_row_str_ready(conn, desc, val, tag);\n}\n\nvoid pg_write_simple_row_str_readyf(struct conn *conn, const char *desc,\n const char *row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_str_ready(conn, desc, row, tag);\n}\n\nvoid pg_write_simple_row_i64_readyf(struct conn *conn, const char *desc,\n int64_t row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_i64_ready(conn, desc, row, tag);\n}\n\nstatic void write_auth_ok(struct conn *conn, struct pg *pg) {\n // dprintf(\">> AuthOK\\n\");\n pg_write_auth(conn, 0); // AuthOK;\n // startup message received, respond\n pg_write_status(conn, \"client_encoding\", \"UTF8\");\n pg_write_status(conn, \"server_encoding\", \"UTF8\");\n char status[128];\n snprintf(status, sizeof(status), \"%s (Pogocache)\", version);\n pg_write_status(conn, \"server_version\", status);\n pg_write_ready(conn, 'I'); // Idle;\n pg->ready = 1;\n}\n\n// Respond to various the connection states.\n// Returns true if the all responses complete or false if there was an\n// error.\nbool pg_respond(struct conn *conn, struct pg *pg) {\n if (pg->error) {\n conn_write_error(conn, parse_lasterror());\n return true;\n }\n if (pg->empty_query) {\n dprintf(\"====== pg_respond(pg->empty_query) =====\\n\");\n conn_write_raw(conn, \"I\\0\\0\\0\\4\", 5);\n conn_write_raw(conn, \"Z\\0\\0\\0\\5I\", 6);\n pg->empty_query = 0;\n return true;\n }\n if (pg->parse) {\n dprintf(\"====== pg_respond(pg->parse) =====\\n\");\n conn_write_raw(conn, \"1\\0\\0\\0\\4\", 5);\n pg->parse = 0;\n return true;\n }\n if (pg->bind) {\n dprintf(\"====== pg_respond(pg->bind) =====\\n\");\n conn_write_raw(conn, \"2\\0\\0\\0\\4\", 5);\n pg->bind = 0;\n return true;\n }\n if (pg->describe) {\n dprintf(\"====== pg_respond(pg->describe) =====\\n\");\n assert(pg->desc);\n conn_write_raw(conn, pg->desc, pg->desclen);\n xfree(pg->desc);\n pg->desc = 0;\n pg->desclen = 0;\n pg->describe = 0;\n return true;\n }\n if (pg->sync) {\n dprintf(\"====== pg_respond(pg->sync) =====\\n\");\n pg->execute = 0;\n pg_write_ready(conn, 'I');\n pg->sync = 0;\n return true;\n }\n if (pg->close) {\n dprintf(\"====== pg_respond(pg->close) =====\\n\");\n pg->close = 0;\n return false;\n }\n if (pg->ssl == 1) {\n if (!conn_istls(conn)) {\n conn_write_raw_cstr(conn, \"N\");\n } else {\n conn_write_raw_cstr(conn, \"Y\");\n }\n pg->ssl = 0;\n return true;\n }\n if (pg->auth == 1) {\n if (pg->startup == 0) {\n return false;\n }\n conn_setauth(conn, true);\n write_auth_ok(conn, pg);\n pg->auth = 0;\n return true;\n }\n if (pg->startup == 1) {\n if (auth && *auth) {\n pg_write_auth(conn, 3); // AuthenticationCleartextPassword;\n } else {\n write_auth_ok(conn, pg);\n pg->startup = 0;\n }\n return true;\n }\n return true;\n}\n\nvoid pg_write_error(struct conn *conn, const char *msg) {\n size_t msglen = strlen(msg);\n size_t size = 1+4;\n size += 1+5+1; // 'S' \"ERROR\" \\0\n size += 1+5+1; // 'V' \"ERROR\" \\0\n size += 1+5+1; // 'C' \"23505\" \\0\n size += 1+msglen+1; // 'M' msg \\0\n size += 1; // null-terminator\n char *bytes = xmalloc(size);\n bytes[0] = 'E';\n write_i32(bytes+1, size-1);\n char *p = bytes+1+4;\n memcpy(p, \"SERROR\", 7);\n p += 7;\n memcpy(p, \"VERROR\", 7);\n p += 7;\n memcpy(p, \"C23505\", 7);\n p += 7;\n p[0] = 'M';\n p++;\n memcpy(p, msg, msglen+1);\n p += msglen+1;\n p[0] = '\\0';\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\n// return true if the command need further execution, of false if this\n// operation handled it already\nbool pg_precommand(struct conn *conn, struct args *args, struct pg *pg) {\n#ifdef PGDEBUG\n printf(\"precommand: \");\n args_print(args);\n#endif\n if (args->len > 0 && args->bufs[0].len > 0) {\n char c = tolower(args->bufs[0].data[0]);\n if (c == 'b' || c == 'r' || c == 'c') {\n // silently ignore transaction commands.\n if (c == 'b' && argeq(args, 0, \"begin\")) {\n pg_write_completef(conn, \"BEGIN\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"rollback\")) {\n pg_write_completef(conn, \"ROLLBACK\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"commit\")) {\n pg_write_completef(conn, \"COMMIT\");\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n if (c == ':' && args->bufs[0].len > 1 && args->bufs[0].data[1] == ':') {\n if (argeq(args, 0, \"::bytea\") || argeq(args, 0, \"::bytes\")) {\n pg->oid = BYTEAOID;\n } else if (argeq(args, 0, \"::text\")) {\n pg->oid = TEXTOID;\n } else {\n char err[128];\n snprintf(err, sizeof(err), \"unknown type '%.*s'\", \n (int)(args->bufs[0].len-2), args->bufs[0].data+2);\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n return false;\n }\n args_remove_first(args);\n if (args->len == 0) {\n if (pg->oid == BYTEAOID) {\n pg_write_completef(conn, \"BYTEA\");\n } else {\n pg_write_completef(conn, \"TEXT\");\n }\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n }\n return true;\n}\n"], ["/pogocache/src/save.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit save.c provides an interface for saving and loading Pogocache\n// data files.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"pogocache.h\"\n#include \"buf.h\"\n#include \"util.h\"\n#include \"lz4.h\"\n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#define BLOCKSIZE 1048576\n#define COMPRESS\n\nextern struct pogocache *cache;\nextern const int verb;\n\nstruct savectx {\n pthread_t th; // work thread\n int index; // thread index\n pthread_mutex_t *lock; // write lock\n int fd; // work file descriptor\n int start; // current shard\n int count; // number of shards to process\n struct buf buf; // block buffer\n bool ok; // final ok\n int errnum; // final errno status\n struct buf dst; // compressed buffer space\n size_t nentries; // number of entried in block buffer\n};\n\nstatic int flush(struct savectx *ctx) {\n if (ctx->nentries == 0) {\n ctx->buf.len = 0;\n return 0;\n }\n // Make sure that there's enough space in the dst buffer to store the\n // header (16 bytes) and the compressed data.\n size_t bounds = LZ4_compressBound(ctx->buf.len);\n buf_ensure(&ctx->dst, 16+bounds);\n // Compress the block\n uint32_t len = LZ4_compress_default((char*)ctx->buf.data, \n (char*)ctx->dst.data+16, ctx->buf.len, bounds);\n // The block is now compressed.\n // Genreate a checksum of the compressed data.\n uint32_t crc = crc32(ctx->dst.data+16, len);\n // Write the 16 byte header\n // (0-3) 'POGO' tag\n memcpy(ctx->dst.data, \"POGO\", 4);\n // (4-7) Checksum\n write_u32(ctx->dst.data+4, crc);\n // (8-11) Len of decompressed data \n write_u32(ctx->dst.data+8, ctx->buf.len);\n // (12-15) Len of compressed data \n write_u32(ctx->dst.data+12, len);\n // The rest of the dst buffer contains the compressed bytes\n uint8_t *p = (uint8_t*)ctx->dst.data;\n uint8_t *end = p + len+16;\n bool ok = true;\n pthread_mutex_lock(ctx->lock);\n while (p < end) {\n ssize_t n = write(ctx->fd, p, end-p);\n if (n < 0) {\n ok = false;\n break;\n }\n p += n;\n }\n pthread_mutex_unlock(ctx->lock);\n ctx->buf.len = 0;\n ctx->nentries = 0;\n return ok ? 0 : -1;\n};\n\nstatic int save_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard;\n struct savectx *ctx = udata;\n buf_append_byte(&ctx->buf, 0); // entry type. zero=k/v string pair;\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n buf_append_uvarint(&ctx->buf, valuelen);\n buf_append(&ctx->buf, value, valuelen);\n if (expires > 0) {\n int64_t ttl = expires-time;\n assert(ttl > 0);\n buf_append_uvarint(&ctx->buf, ttl);\n } else {\n buf_append_uvarint(&ctx->buf, 0);\n }\n buf_append_uvarint(&ctx->buf, flags);\n buf_append_uvarint(&ctx->buf, cas);\n ctx->nentries++;\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void *thsave(void *arg) {\n struct savectx *ctx = arg;\n for (int i = 0; i < ctx->count; i++) {\n int shardidx = ctx->start+i;\n struct pogocache_iter_opts opts = {\n .oneshard = true,\n .oneshardidx = shardidx,\n .time = sys_now(),\n .entry = save_entry,\n .udata = ctx,\n };\n // write the unix timestamp before entries\n buf_append_uvarint(&ctx->buf, sys_unixnow());\n int status = pogocache_iter(cache, &opts);\n if (status == POGOCACHE_CANCELED) {\n goto done;\n }\n if (flush(ctx) == -1) {\n goto done;\n }\n }\n ctx->ok = true;\ndone:\n buf_clear(&ctx->buf);\n buf_clear(&ctx->dst);\n ctx->errnum = errno;\n return 0;\n}\n\nint save(const char *path, bool fast) {\n uint64_t seed = sys_seed();\n size_t psize = strlen(path)+32;\n char *workpath = xmalloc(psize);\n snprintf(workpath, psize, \"%s.%08x.pogocache.work\", path, \n (int)(seed%INT_MAX));\n if (verb > 1) {\n printf(\". Saving to work file %s\\n\", workpath);\n }\n int fd = open(workpath, O_RDWR|O_CREAT, S_IRUSR|S_IRGRP|S_IROTH);\n if (fd == -1) {\n return -1;\n }\n int nshards = pogocache_nshards(cache);\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n if (!fast) {\n nprocs = 1;\n }\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n struct savectx *ctxs = xmalloc(nprocs*sizeof(struct savectx));\n memset(ctxs, 0, nprocs*sizeof(struct savectx));\n bool ok = false;\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n ctx->index = i;\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->fd = fd;\n ctx->lock = &lock;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (nprocs > 1) {\n if (pthread_create(&ctx->th, 0, thsave, ctx) == -1) {\n ctx->th = 0;\n }\n }\n start += ctx->count;\n }\n // execute operations on failed threads (or fast=false)\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thsave(ctx);\n }\n }\n // wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n // check for any failures\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (!ctx->ok) {\n errno = ctx->errnum;\n goto done;\n }\n }\n // Move file work file to final path\n if (rename(workpath, path) == -1) {\n goto done;\n }\n ok = true;\ndone:\n close(fd);\n unlink(workpath);\n xfree(workpath);\n xfree(ctxs);\n return ok ? 0 : -1;\n}\n\n// compressed block\nstruct cblock {\n struct buf cdata; // compressed data\n size_t dlen; // decompressed size\n};\n\nstruct loadctx {\n pthread_t th;\n\n // shared context\n pthread_mutex_t *lock;\n pthread_cond_t *cond;\n bool *donereading; // shared done flag\n int *nblocks; // number of blocks in queue\n struct cblock *blocks; // the block queue\n bool *failure; // a thread will set this upon error\n\n // thread status\n atomic_bool ok;\n int errnum;\n size_t ninserted;\n size_t nexpired;\n};\n\nstatic bool load_block(struct cblock *block, struct loadctx *ctx) {\n (void)ctx;\n bool ok = false;\n\n int64_t now = sys_now();\n int64_t unixnow = sys_unixnow();\n\n // decompress block\n char *ddata = xmalloc(block->dlen);\n int ret = LZ4_decompress_safe(block->cdata.data, ddata, block->cdata.len, \n block->dlen);\n if (ret < 0 || (size_t)ret != block->dlen) {\n printf(\". bad compressed block\\n\");\n goto done;\n }\n buf_clear(&block->cdata);\n uint8_t *p = (void*)ddata;\n uint8_t *e = p + block->dlen;\n\n int n;\n uint64_t x;\n // read unix time\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n printf(\". bad unix time\\n\");\n goto done;\n }\n p += n;\n\n int64_t unixtime = x;\n // printf(\". unixtime=%lld\\n\", unixtime);\n\n // Read each entry from decompressed data\n while (e > p) {\n /////////////////////\n // kind\n uint8_t kind = *(p++);\n \n if (kind != 0) {\n // only k/v strings allowed at this time.\n printf(\">> %d\\n\", kind);\n printf(\". unknown kind\\n\");\n goto done;\n }\n /////////////////////\n // key\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t keylen = x;\n if ((size_t)(e-p) < keylen) {\n goto done;\n }\n const uint8_t *key = p;\n p += keylen;\n /////////////////////\n // val\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t vallen = x;\n if ((size_t)(e-p) < vallen) {\n goto done;\n }\n const uint8_t *val = p;\n p += vallen;\n /////////////////////\n // ttl\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n goto done;\n }\n int64_t ttl = x;\n p += n;\n /////////////////////\n // flags\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > UINT32_MAX) {\n goto done;\n }\n uint32_t flags = x;\n p += n;\n /////////////////////\n // cas\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0) {\n goto done;\n }\n uint64_t cas = x;\n p += n;\n if (ttl > 0) {\n int64_t unixexpires = int64_add_clamp(unixtime, ttl);\n if (unixexpires < unixnow) {\n // already expired, skip this entry\n ctx->nexpired++;\n continue;\n }\n ttl = unixexpires-unixnow;\n }\n struct pogocache_store_opts opts = {\n .flags = flags,\n .time = now,\n .ttl = ttl,\n .cas = cas,\n };\n // printf(\"[%.*s]=[%.*s]\\n\", (int)keylen, key, (int)vallen, val);\n int ret = pogocache_store(cache, key, keylen, val, vallen, &opts);\n (void)ret;\n assert(ret == POGOCACHE_INSERTED || ret == POGOCACHE_REPLACED);\n ctx->ninserted++;\n }\n ok = true;\ndone:\n buf_clear(&block->cdata);\n xfree(ddata);\n if (!ok) {\n printf(\". bad block\\n\");\n }\n return ok;\n}\n\nstatic void *thload(void *arg) {\n struct loadctx *ctx = arg;\n pthread_mutex_lock(ctx->lock);\n while (1) {\n if (*ctx->failure) {\n break;\n }\n if (*ctx->nblocks > 0) {\n // Take a block for processing\n struct cblock block = ctx->blocks[(*ctx->nblocks)-1];\n (*ctx->nblocks)--;\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n ctx->ok = load_block(&block, ctx);\n pthread_mutex_lock(ctx->lock);\n if (!ctx->ok) {\n *ctx->failure = true;\n break;\n }\n // next block\n continue;\n }\n if (*ctx->donereading) {\n break;\n }\n pthread_cond_wait(ctx->cond, ctx->lock);\n }\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n if (!ctx->ok) {\n ctx->errnum = errno;\n }\n return 0;\n}\n\n// load data into cache from path\nint load(const char *path, bool fast, struct load_stats *stats) {\n // Use a single stream reader. Handing off blocks to threads.\n struct load_stats sstats;\n if (!stats) {\n stats = &sstats;\n }\n memset(stats, 0, sizeof(struct load_stats));\n\n int fd = open(path, O_RDONLY);\n if (fd == -1) {\n return -1;\n }\n\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n pthread_cond_t cond = PTHREAD_COND_INITIALIZER;\n bool donereading = false;\n bool failure = false;\n\n int nprocs = fast ? sys_nprocs() : 1;\n struct loadctx *ctxs = xmalloc(nprocs*sizeof(struct loadctx));\n memset(ctxs, 0, nprocs*sizeof(struct loadctx));\n int nblocks = 0;\n struct cblock *blocks = xmalloc(sizeof(struct cblock)*nprocs);\n memset(blocks, 0, sizeof(struct cblock)*nprocs);\n int therrnum = 0;\n bool ok = true;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n ctx->lock = &lock;\n ctx->cond = &cond;\n ctx->donereading = &donereading;\n ctx->nblocks = &nblocks;\n ctx->failure = &failure;\n ctx->blocks = blocks;\n atomic_init(&ctx->ok, true);\n if (pthread_create(&ctx->th, 0, thload, ctx) == -1) {\n ctx->th = 0;\n ok = false;\n if (therrnum == 0) {\n therrnum = errno;\n }\n }\n }\n if (!ok) {\n // there was an error creating a thread. \n // At this point there may be some orphaned threads waiting on \n // a condition variable. \n goto shutdown_threads;\n }\n\n // Read the blocks from file, one at a time, handing putting blocks into\n // the 'blocks' queue. The running threads will pick these up and \n // process them in no specific order.\n struct buf cdata = { 0 };\n bool shortread = false;\n while (ok) {\n uint8_t head[16];\n ssize_t size = read(fd, head, 16);\n if (size <= 0) {\n if (size == -1) {\n ok = false;\n }\n break;\n }\n if (size < 16) {\n printf(\". bad head size\\n\");\n ok = false;\n break;\n }\n if (memcmp(head, \"POGO\", 4) != 0) {\n printf(\". missing 'POGO'\\n\");\n ok = false;\n break;\n }\n uint32_t crc;\n memcpy(&crc, head+4, 4);\n size_t dlen = read_u32(head+8);\n size_t clen = read_u32(head+12);\n buf_ensure(&cdata, clen);\n bool okread = true;\n size_t total = 0;\n while (total < clen) {\n ssize_t rlen = read(fd, cdata.data+total, clen-total);\n if (rlen <= 0) {\n shortread = true;\n okread = false;\n break;\n }\n total += rlen;\n }\n if (!okread) {\n if (shortread) {\n printf(\". shortread\\n\");\n }\n ok = false;\n break;\n }\n cdata.len = clen;\n stats->csize += clen;\n stats->dsize += dlen;\n uint32_t crc2 = crc32(cdata.data, clen);\n if (crc2 != crc) {\n printf(\". bad crc\\n\");\n ok = false;\n goto bdone;\n }\n // We have a good block. Push it into the queue\n pthread_mutex_lock(&lock);\n while (1) {\n if (failure) {\n // A major error occured, stop reading now\n ok = false;\n break;\n }\n if (nblocks == nprocs) {\n // Queue is currently filled up.\n // Wait and try again.\n pthread_cond_wait(&cond, &lock);\n continue;\n }\n // Add block to queue\n blocks[nblocks++] = (struct cblock){ \n .cdata = cdata,\n .dlen = dlen,\n };\n memset(&cdata, 0, sizeof(struct buf));\n pthread_cond_broadcast(&cond);\n break;\n }\n pthread_mutex_unlock(&lock);\n }\nbdone:\n buf_clear(&cdata);\n\n\nshutdown_threads:\n // Stop all threads\n pthread_mutex_lock(&lock);\n donereading = true;\n pthread_mutex_unlock(&lock);\n pthread_cond_broadcast(&cond);\n\n // Wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n stats->nexpired += ctx->nexpired;\n stats->ninserted += ctx->ninserted;\n }\n }\n\n // Get the current error, if any\n errno = 0;\n ok = ok && !failure;\n if (!ok) {\n errno = therrnum;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n if (!ctx->ok) {\n errno = ctx->errnum;\n break;\n }\n }\n }\n }\n\n // Free all resources.\n for (int i = 0; i < nblocks; i++) {\n buf_clear(&blocks[i].cdata);\n }\n xfree(blocks);\n xfree(ctxs);\n close(fd);\n return ok ? 0 : -1;\n}\n\n// removes all work files and checks that the current directory is valid.\nbool cleanwork(const char *persist) {\n if (*persist == '\\0') {\n return false;\n }\n bool ok = false;\n char *path = xmalloc(strlen(persist)+1);\n strcpy(path, persist);\n char *dirpath = dirname(path);\n DIR *dir = opendir(dirpath);\n if (!dir) {\n perror(\"# opendir\");\n goto done;\n }\n struct dirent *entry;\n while ((entry = readdir(dir))) {\n if (entry->d_type != DT_REG) {\n continue;\n }\n const char *ext = \".pogocache.work\";\n if (strlen(entry->d_name) < strlen(ext) ||\n strcmp(entry->d_name+strlen(entry->d_name)-strlen(ext), ext) != 0)\n {\n continue;\n }\n size_t filepathcap = strlen(dirpath)+1+strlen(entry->d_name)+1;\n char *filepath = xmalloc(filepathcap);\n snprintf(filepath, filepathcap, \"%s/%s\", dirpath, entry->d_name);\n if (unlink(filepath) == 0) {\n printf(\"# deleted work file %s\\n\", filepath);\n } else {\n perror(\"# unlink\");\n }\n xfree(filepath);\n }\n ok = true;\ndone:\n if (dir) {\n closedir(dir);\n }\n xfree(path);\n return ok;\n}\n"], ["/pogocache/src/cmds.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit cmd.c handles all incoming client commands.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n#include \"pogocache.h\"\n#include \"stats.h\"\n\n// from main.c\nextern const uint64_t seed;\nextern const char *path;\nextern const int verb;\nextern const char *auth;\nextern const bool useauth;\nextern const char *persist;\nextern const int nthreads;\nextern const char *version;\nextern const char *githash;\nextern atomic_int_fast64_t flush_delay;\nextern atomic_bool sweep;\nextern atomic_bool lowmem;\nextern const int nshards;\nextern const int narenas;\nextern const int64_t procstart;\nextern const int maxconns;\n\nextern struct pogocache *cache;\n\nstruct set_entry_context {\n bool written;\n struct conn *conn;\n const char *cmdname;\n};\n\nstatic bool set_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)val, (void)vallen,\n (void)expires, (void)flags, (void)cas;\n struct set_entry_context *ctx = udata;\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n pg_write_row_desc(ctx->conn, (const char*[]){ \"value\" }, 1);\n pg_write_row_data(ctx->conn, (const char*[]){ val }, \n (size_t[]){ vallen }, 1);\n pg_write_completef(ctx->conn, \"%s 1\", ctx->cmdname);\n pg_write_ready(ctx->conn, 'I');\n } else {\n conn_write_bulk(ctx->conn, val, vallen);\n }\n ctx->written = true;\n return true;\n}\n\nstatic void execSET(struct conn *conn, const char *cmdname, \n int64_t now, const char *key,\n size_t keylen, const char *val, size_t vallen, int64_t expires, bool nx,\n bool xx, bool get, bool keepttl, uint32_t flags, uint64_t cas, bool withcas)\n{\n stat_cmd_set_incr(conn);\n struct set_entry_context ctx = { .conn = conn, .cmdname = cmdname };\n struct pogocache_store_opts opts = {\n .time = now,\n .expires = expires,\n .cas = cas,\n .flags = flags,\n .keepttl = keepttl,\n .casop = withcas,\n .nx = nx,\n .xx = xx,\n .lowmem = atomic_load_explicit(&lowmem, __ATOMIC_ACQUIRE),\n .entry = get?set_entry:0,\n .udata = get?&ctx:0,\n };\n int status = pogocache_store(cache, key, keylen, val, vallen, &opts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n return;\n }\n if (get) {\n if (!ctx.written) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n pg_write_completef(conn, \"%s 0\", cmdname);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_null(conn);\n }\n }\n return;\n }\n bool stored = status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED;\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (!stored) {\n if (status == POGOCACHE_FOUND) {\n conn_write_raw(conn, \"EXISTS\\r\\n\", 8);\n } else {\n conn_write_raw(conn, \"NOT_FOUND\\r\\n\", 12);\n }\n } else {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n }\n break;\n case PROTO_HTTP:\n if (!stored) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Stored\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"%s %d\", cmdname, stored?1:0);\n pg_write_ready(conn, 'I');\n break;\n default:\n if (!stored) {\n conn_write_null(conn);\n } else {\n conn_write_string(conn, \"OK\");\n }\n break;\n }\n}\n\nstatic int64_t expiry_seconds_time(struct conn *conn, int64_t now, \n int64_t expiry)\n{\n if (conn_proto(conn) == PROTO_MEMCACHE && expiry > HOUR*24*30) {\n // Consider Unix time value rather than an offset from current time.\n int64_t unix_ = sys_unixnow();\n if (expiry > unix_) {\n expiry = expiry-sys_unixnow();\n } else {\n expiry = 0;\n }\n }\n return int64_add_clamp(now, expiry);\n}\n\n// SET key value [NX | XX] [GET] [EX seconds | PX milliseconds |\n// EXAT unix-time-seconds | PXAT unix-time-milliseconds | KEEPTTL] \n// [FLAGS flags] [CAS cas] \nstatic void cmdSET(struct conn *conn, struct args *args) {\n#ifdef CMDSETOK\n // For testing the theoretical top speed of a single SET command.\n // No data is stored.\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\n#endif\n // RESP command\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n const char *val = args->bufs[2].data;\n size_t vallen = args->bufs[2].len;\n int64_t expires = 0;\n int exkind = 0;\n bool nx = false;\n bool xx = false;\n bool get = false;\n bool keepttl = false;\n bool hasex = false;\n uint32_t flags = 0;\n uint64_t cas = 0;\n bool withcas = false;\n for (size_t i = 3; i < args->len; i++) {\n if (argeq(args, i, \"ex\")) {\n exkind = 1;\n goto parse_ex;\n } else if (argeq(args, i, \"px\")) {\n exkind = 2;\n goto parse_ex;\n } else if (argeq(args, i, \"exat\")) {\n exkind = 3;\n goto parse_ex;\n } else if (argeq(args, i, \"pxat\")) {\n exkind = 4;\n parse_ex:\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, \n &expires);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n if (expires <= 0) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n // memcache allows for negative expiration\n expires = expiry_seconds_time(conn, now, 0);\n goto skip_exkind;\n } else {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n }\n switch (exkind) {\n case 1:\n expires = int64_mul_clamp(expires, SECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 2:\n expires = int64_mul_clamp(expires, MILLISECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 3:\n expires = int64_mul_clamp(expires, SECOND);\n break;\n case 4:\n expires = int64_mul_clamp(expires, MILLISECOND);\n break;\n }\n skip_exkind:\n hasex = true;\n } else if (argeq(args, i, \"nx\")) {\n nx = true;\n } else if (argeq(args, i, \"xx\")) {\n xx = true;\n } else if (argeq(args, i, \"get\")) {\n get = true;\n } else if (argeq(args, i, \"keepttl\")) {\n keepttl = true;\n } else if (argeq(args, i, \"flags\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n uint64_t x;\n if (!argu64(args, i, &x)) {\n goto err_syntax;\n }\n flags = x&UINT32_MAX;\n } else if (argeq(args, i, \"cas\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n if (!argu64(args, i, &cas)) {\n goto err_syntax;\n }\n withcas = true;\n } else {\n goto err_syntax;\n }\n }\n assert(expires >= 0);\n if (keepttl && hasex > 0){\n goto err_syntax;\n }\n if (xx && nx > 0){\n goto err_syntax;\n }\n execSET(conn, \"SET\", now, key, keylen, val, vallen, expires, nx, xx, get,\n keepttl, flags, cas, withcas);\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n}\n\nstatic void cmdSETEX(struct conn *conn, struct args *args) {\n if (args->len != 4) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t ex = 0;\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool ok = parse_i64(args->bufs[2].data, args->bufs[2].len, &ex);\n if (!ok || ex <= 0) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n ex = int64_mul_clamp(ex, SECOND);\n ex = int64_add_clamp(sys_now(), ex);\n const char *val = args->bufs[3].data;\n size_t vallen = args->bufs[3].len;\n execSET(conn, \"SETEX\", now, key, keylen, val, vallen, ex, 0, 0, 0, 0, 0, 0,\n 0);\n}\n\nstruct get_entry_context {\n struct conn *conn;\n bool cas;\n bool mget;\n};\n\nstatic void get_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)key, (void)keylen, (void)cas;\n (void)shard, (void)time, (void)expires, (void)flags, (void)update;\n struct get_entry_context *ctx = udata;\n int x;\n uint8_t buf[24];\n size_t n;\n switch (conn_proto(ctx->conn)) {\n case PROTO_POSTGRES:;\n char casbuf[24];\n if (ctx->cas) {\n x = 1;\n n = snprintf(casbuf, sizeof(casbuf), \"%\" PRIu64, cas);\n } else {\n x = 0;\n casbuf[0] = '\\0';\n n = 0;\n }\n if (ctx->mget) {\n pg_write_row_data(ctx->conn, (const char*[]){ key, val, casbuf }, \n (size_t[]){ keylen, vallen, n }, 2+x);\n } else {\n pg_write_row_data(ctx->conn, (const char*[]){ val, casbuf }, \n (size_t[]){ vallen, n }, 1+x);\n }\n break;\n case PROTO_MEMCACHE:\n conn_write_raw(ctx->conn, \"VALUE \", 6);\n conn_write_raw(ctx->conn, key, keylen);\n n = u64toa(flags, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n n = u64toa(vallen, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n if (ctx->cas) {\n n = u64toa(cas, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n }\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n conn_write_raw(ctx->conn, val, vallen);\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n break;\n case PROTO_HTTP:\n conn_write_http(ctx->conn, 200, \"OK\", val, vallen);\n break;\n default:\n if (ctx->cas) {\n conn_write_array(ctx->conn, 2);\n conn_write_uint(ctx->conn, cas);\n }\n conn_write_bulk(ctx->conn, val, vallen);\n }\n}\n\n// GET key\nstatic void cmdGET(struct conn *conn, struct args *args) {\n stat_cmd_get_incr(conn);\n#ifdef CMDGETNIL\n conn_write_null(conn);\n return;\n#endif\n#ifdef CMDSETOK\n conn_write_string(conn, \"$1\\r\\nx\\r\\n\");\n return;\n#endif\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n struct get_entry_context ctx = { \n .conn = conn\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_HTTP) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\" , -1);\n } else if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 0\");\n } else {\n conn_write_null(conn);\n }\n } else {\n stat_get_hits_incr(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 1\");\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_ready(conn, 'I');\n }\n}\n\n// MGET key [key...]\nstatic void cmdMGET(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct get_entry_context ctx = { \n .conn = conn,\n .mget = true,\n .cas = argeq(args, 0, \"mgets\"),\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int count = 0;\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\", \"value\", \"cas\" }, \n 2+(ctx.cas?1:0));\n } else if (proto == PROTO_RESP) {\n conn_write_array(conn, args->len-1);\n }\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_get_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_null(conn);\n }\n } else {\n count++;\n stat_get_hits_incr(conn);\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"MGET %d\", count);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n }\n}\n\nstruct keys_ctx {\n int64_t now;\n struct buf buf;\n size_t count;\n char *pattern;\n size_t plen;\n};\n\nstatic void keys_ctx_free(struct keys_ctx *ctx) {\n xfree(ctx->pattern);\n buf_clear(&ctx->buf);\n xfree(ctx);\n}\n\n// pattern matcher\n// see https://github.com/tidwall/match.c\nstatic bool match(const char *pat, size_t plen, const char *str, size_t slen,\n int depth)\n{\n if (depth == 128) {\n return false;\n }\n while (plen > 0) {\n if (pat[0] == '\\\\') {\n if (plen == 1) return false;\n pat++; plen--; \n } else if (pat[0] == '*') {\n if (plen == 1) return true;\n if (pat[1] == '*') {\n pat++; plen--;\n continue;\n }\n if (match(pat+1, plen-1, str, slen, depth+1)) return true;\n if (slen == 0) return false;\n str++; slen--;\n continue;\n }\n if (slen == 0) return false;\n if (pat[0] != '?' && str[0] != pat[0]) return false;\n pat++; plen--;\n str++; slen--;\n }\n return slen == 0 && plen == 0;\n}\n\nstatic int keys_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)value, (void)valuelen, (void)expires, \n (void)flags, (void)cas;\n struct keys_ctx *ctx = udata;\n if ((ctx->plen == 1 && *ctx->pattern == '*') || \n match(ctx->pattern, ctx->plen, key, keylen, 0))\n {\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n ctx->count++;\n }\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void bgkeys_work(void *udata) {\n struct keys_ctx *ctx = udata;\n struct pogocache_iter_opts opts = {\n .time = ctx->now,\n .entry = keys_entry,\n .udata = ctx,\n };\n pogocache_iter(cache, &opts);\n}\n\nstatic void bgkeys_done(struct conn *conn, void *udata) {\n struct keys_ctx *ctx = udata;\n int proto = conn_proto(conn);\n const char *p = ctx->buf.data;\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\" }, 1);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n pg_write_row_data(conn, (const char*[]){ key }, \n (size_t[]){ keylen }, 1);\n }\n pg_write_completef(conn, \"KEYS %zu\", ctx->count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_array(conn, ctx->count);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n conn_write_bulk(conn, key, keylen);\n }\n }\n keys_ctx_free(ctx);\n}\n\nstatic void cmdKEYS(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *pattern = args->bufs[1].data;\n size_t plen = args->bufs[1].len;\n struct keys_ctx *ctx = xmalloc(sizeof(struct keys_ctx));\n memset(ctx, 0, sizeof(struct keys_ctx));\n ctx->pattern = xmalloc(plen+1);\n memcpy(ctx->pattern, pattern, plen);\n ctx->pattern[plen] = '\\0';\n ctx->plen = plen;\n ctx->now = now;\n if (!conn_bgwork(conn, bgkeys_work, bgkeys_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n keys_ctx_free(ctx);\n }\n}\n\nstatic void cmdDEL(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct pogocache_delete_opts opts = {\n .time = now,\n };\n int64_t deleted = 0;\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_delete(cache, key, keylen, &opts);\n if (status == POGOCACHE_DELETED) {\n stat_delete_hits_incr(conn);\n deleted++;\n } else {\n stat_delete_misses_incr(conn);\n }\n }\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (deleted == 0) {\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n } else {\n conn_write_raw_cstr(conn, \"DELETED\\r\\n\");\n }\n break;\n case PROTO_HTTP:\n if (deleted == 0) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Deleted\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"DEL %\" PRIi64, deleted);\n pg_write_ready(conn, 'I');\n break;\n default:\n conn_write_int(conn, deleted);\n }\n}\n\nstatic void cmdDBSIZE(struct conn *conn, struct args *args) {\n if (args->len != 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n struct pogocache_count_opts opts = { .time = sys_now() };\n size_t count = pogocache_count(cache, &opts);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"count\", count, \"DBSIZE\");\n } else {\n conn_write_int(conn, (int64_t)count);\n }\n}\n\nstruct flushctx { \n pthread_t th;\n int64_t time;\n int start;\n int count;\n};\n\nstatic void *thflush(void *arg) {\n struct flushctx *ctx = arg;\n struct pogocache_clear_opts opts = { .time = sys_now(), .oneshard = true };\n for (int i = 0; i < ctx->count; i++) {\n opts.oneshardidx = i+ctx->start;\n pogocache_clear(cache, &opts);\n }\n return 0;\n}\n\nstatic void bgflushwork(void *udata) {\n (void)udata;\n atomic_store(&flush_delay, 0);\n int64_t now = sys_now();\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n struct flushctx *ctxs = xmalloc(nprocs*sizeof(struct flushctx));\n memset(ctxs, 0, nprocs*sizeof(struct flushctx));\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->time = now;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (pthread_create(&ctx->th, 0, thflush, ctx) == -1) {\n ctx->th = 0;\n }\n start += ctx->count;\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thflush(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n}\n\nstatic void bgflushdone(struct conn *conn, void *udata) {\n const char *cmdname = udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s SYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\n// FLUSHALL [SYNC|ASYNC] [DELAY ]\nstatic void cmdFLUSHALL(struct conn *conn, struct args *args) {\n const char *cmdname = \n args_eq(args, 0, \"flush\") ? \"FLUSH\" :\n args_eq(args, 0, \"flushdb\") ? \"FLUSHDB\" :\n \"FLUSHALL\";\n stat_cmd_flush_incr(conn);\n bool async = false;\n int64_t delay = 0;\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"async\")) {\n async = true;\n } else if (argeq(args, i, \"sync\")) {\n async = false;\n } else if (argeq(args, i, \"delay\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, &delay);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid exptime argument\");\n return;\n }\n if (delay > 0) {\n async = true;\n }\n } else {\n goto err_syntax;\n }\n }\n if (async) {\n if (delay < 0) {\n delay = 0;\n }\n delay = int64_mul_clamp(delay, SECOND);\n delay = int64_add_clamp(delay, sys_now());\n atomic_store(&flush_delay, delay);\n // ticker will check the delay and perform the flush\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s ASYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n // Flush database is slow. cmdname is static and thread safe\n conn_bgwork(conn, bgflushwork, bgflushdone, (void*)cmdname);\n return;\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct bgsaveloadctx {\n bool ok; // true = success, false = out of disk space\n bool fast; // use all the proccesing power, otherwise one thread.\n char *path; // path to file\n bool load; // otherwise save\n};\n\nstatic void bgsaveloadwork(void *udata) {\n struct bgsaveloadctx *ctx = udata;\n int64_t start = sys_now();\n int status;\n if (ctx->load) {\n status = load(ctx->path, ctx->fast, 0);\n } else {\n status = save(ctx->path, ctx->fast);\n }\n printf(\". %s finished %.3f secs\\n\", ctx->load?\"load\":\"save\", \n (sys_now()-start)/1e9);\n ctx->ok = status == 0;\n}\n\nstatic void bgsaveloaddone(struct conn *conn, void *udata) {\n struct bgsaveloadctx *ctx = udata;\n if (ctx->ok) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s OK\", ctx->load?\"LOAD\":\"SAVE\");\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (ctx->load) {\n conn_write_error(conn, \"load failed\");\n } else {\n conn_write_error(conn, \"save failed\");\n }\n }\n xfree(ctx->path);\n xfree(ctx);\n}\n\n// SAVE [TO ] [FAST]\n// LOAD [FROM ] [FAST]\nstatic void cmdSAVELOAD(struct conn *conn, struct args *args) {\n bool load = argeq(args, 0, \"load\");\n bool fast = false;\n const char *path = persist;\n size_t plen = strlen(persist);\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"fast\")) {\n fast = true;\n } else if ((load && argeq(args, i, \"from\")) || \n (!load && argeq(args, i, \"to\")))\n {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n path = args->bufs[i].data;\n plen = args->bufs[i].len;\n } else {\n goto err_syntax;\n }\n }\n if (plen == 0) {\n conn_write_error(conn, \"ERR path not provided\");\n return;\n }\n struct bgsaveloadctx *ctx = xmalloc(sizeof(struct bgsaveloadctx));\n memset(ctx, 0, sizeof(struct bgsaveloadctx));\n ctx->fast = fast;\n ctx->path = xmalloc(plen+1);\n ctx->load = load;\n memcpy(ctx->path, path, plen);\n ctx->path[plen] = '\\0';\n if (!conn_bgwork(conn, bgsaveloadwork, bgsaveloaddone, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx->path);\n xfree(ctx);\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct ttlctx {\n struct conn *conn;\n bool pttl;\n};\n\nstatic void ttl_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)key, (void)keylen, (void)val, (void)vallen, (void)flags,\n (void)cas, (void)update;\n struct ttlctx *ctx = udata;\n int64_t ttl;\n if (expires > 0) {\n ttl = expires-time;\n if (ctx->pttl) {\n ttl /= MILLISECOND;\n } else {\n ttl /= SECOND;\n }\n } else {\n ttl = -1;\n }\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n char ttlstr[24];\n size_t n = i64toa(ttl, (uint8_t*)ttlstr);\n pg_write_row_data(ctx->conn, (const char*[]){ ttlstr }, \n (size_t[]){ n }, 1);\n } else {\n conn_write_int(ctx->conn, ttl);\n }\n}\n\nstatic void cmdTTL(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool pttl = argeq(args, 0, \"pttl\");\n struct ttlctx ctx = { .conn = conn, .pttl = pttl };\n struct pogocache_load_opts opts = {\n .time = sys_now(),\n .entry = ttl_entry,\n .notouch = true,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ pttl?\"pttl\":\"ttl\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_int(conn, -2);\n }\n } else {\n stat_get_hits_incr(conn);\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %d\", pttl?\"PTTL\":\"TTL\",\n status!=POGOCACHE_NOTFOUND);\n pg_write_ready(conn, 'I');\n }\n}\n\nstatic void expire_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)expires, (void)cas;\n struct pogocache_update *ctx = udata;\n ctx->flags = flags;\n ctx->value = value;\n ctx->valuelen = valuelen;\n *update = ctx;\n}\n\n// EXPIRE key seconds\n// returns 1 if success or 0 on failure. \nstatic void cmdEXPIRE(struct conn *conn, struct args *args) {\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n int64_t expires;\n if (!argi64(args, 2, &expires)) {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n return;\n }\n expires = int64_mul_clamp(expires, POGOCACHE_SECOND);\n expires = int64_add_clamp(now, expires);\n struct pogocache_update ctx = { .expires = expires };\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = expire_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(cache, key, keylen, &lopts);\n int ret = status == POGOCACHE_FOUND;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"EXPIRE %d\", ret);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, ret);\n }\n}\n\n// EXISTS key [key...]\n// Checks if one or more keys exist in the cache.\n// Return the number of keys that exist\nstatic void cmdEXISTS(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t count = 0;\n struct pogocache_load_opts opts = {\n .time = now,\n .notouch = true,\n };\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n count++;\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"exists\", count, \"EXISTS\");\n } else {\n conn_write_int(conn, count);\n }\n}\n\nstatic void sweep_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n size_t swept;\n size_t kept;\n struct pogocache_sweep_opts opts = {\n .time = start,\n };\n printf(\". sweep started\\n\");\n pogocache_sweep(cache, &swept, &kept, &opts);\n double elapsed = (sys_now()-start)/1e9;\n printf(\". sweep finished in %.2fs, (swept=%zu, kept=%zu) \\n\", elapsed, \n swept, kept);\n}\n\nstatic void sweep_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thsweep(void *arg) {\n (void)arg;\n sweep_work(0);\n return 0;\n}\n\n// SWEEP [ASYNC]\nstatic void cmdSWEEP(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thsweep, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, sweep_work, sweep_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstatic void purge_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n printf(\". purge started\\n\");\n xpurge();\n double elapsed = (sys_now()-start)/1e9;\n printf(\". purge finished in %.2fs\\n\", elapsed);\n}\n\nstatic void purge_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thpurge(void *arg) {\n (void)arg;\n purge_work(0);\n return 0;\n}\n\n// PURGE [ASYNC]\nstatic void cmdPURGE(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thpurge, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, purge_work, purge_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstruct populate_ctx {\n pthread_t th;\n size_t start;\n size_t count;\n char *prefix;\n size_t prefixlen;\n char *val;\n size_t vallen;\n bool randex;\n int randmin;\n int randmax;\n};\n\nstatic void *populate_entry(void *arg) {\n int64_t now = sys_now();\n struct populate_ctx *ctx = arg;\n char *key = xmalloc(ctx->prefixlen+32);\n memcpy(key, ctx->prefix, ctx->prefixlen);\n key[ctx->prefixlen++] = ':';\n for (size_t i = ctx->start; i < ctx->start+ctx->count; i++) {\n size_t n = i64toa(i, (uint8_t*)(key+ctx->prefixlen));\n size_t keylen = ctx->prefixlen+n;\n struct pogocache_store_opts opts = { \n .time = now,\n };\n if (ctx->randex) {\n int ex = (rand()%(ctx->randmax-ctx->randmin))+ctx->randmin;\n opts.ttl = ex*POGOCACHE_SECOND;\n }\n pogocache_store(cache, key, keylen, ctx->val, ctx->vallen, &opts);\n }\n xfree(key);\n return 0;\n}\n\n// DEBUG POPULATE [rand-ex-range]\n// DEBUG POPULATE \n// DEBUG POPULATE 1000000 test 16\n// DEBUG POPULATE 1000000 test 16 5-10\nstatic void cmdDEBUG_populate(struct conn *conn, struct args *args) {\n if (args->len != 4 && args->len != 5) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t count;\n if (!argi64(args, 1, &count) || count < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n size_t prefixlen = args->bufs[2].len;\n char *prefix = args->bufs[2].data;\n int64_t vallen;\n if (!argi64(args, 3, &vallen) || vallen < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n bool randex = false;\n int randmin = 0;\n int randmax = 0;\n if (args->len == 5) {\n size_t exlen = args->bufs[4].len;\n char *aex = args->bufs[4].data;\n char *ex = xmalloc(exlen+1);\n memcpy(ex, aex, exlen);\n ex[exlen] = '\\0';\n if (strchr(ex, '-')) {\n randmin = atoi(ex);\n randmax = atoi(strchr(ex, '-')+1);\n randex = true;\n }\n xfree(ex);\n }\n\n char *val = xmalloc(vallen);\n memset(val, 0, vallen);\n int nprocs = sys_nprocs();\n if (nprocs < 0) {\n nprocs = 1;\n }\n struct populate_ctx *ctxs = xmalloc(nprocs*sizeof(struct populate_ctx));\n memset(ctxs, 0, nprocs*sizeof(struct populate_ctx));\n size_t group = count/nprocs;\n size_t start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n ctx->start = start;\n if (i == nprocs-1) {\n ctx->count = count-start;\n } else {\n ctx->count = group;\n }\n ctx->prefix = prefix;\n ctx->prefixlen = prefixlen;\n ctx->val = val;\n ctx->vallen = vallen;\n ctx->randex = randex;\n ctx->randmin = randmin;\n ctx->randmax = randmax;\n if (pthread_create(&ctx->th, 0, populate_entry, ctx) == -1) {\n ctx->th = 0;\n }\n start += group;\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n populate_entry(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n xfree(val);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"DEBUG POPULATE %\" PRIi64, count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstruct dbg_detach_ctx {\n int64_t now;\n int64_t then;\n};\n\nstatic void detach_work(void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n ctx->then = sys_now();\n // printf(\". ----- DELAY START\\n\");\n // sleep(1);\n // printf(\". ----- DELAY END\\n\");\n}\n\nstatic void detach_done(struct conn *conn, void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n char buf[128];\n snprintf(buf, sizeof(buf), \"%\" PRId64 \":%\" PRId64, ctx->now, ctx->then);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_str_ready(conn, \"detach\", buf, \"DEBUG DETACH\");\n } else {\n conn_write_bulk_cstr(conn, buf);\n }\n xfree(ctx);\n}\n\n// DEBUG detach\nstatic void cmdDEBUG_detach(struct conn *conn, struct args *args) {\n (void)args;\n struct dbg_detach_ctx *ctx = xmalloc(sizeof(struct dbg_detach_ctx));\n memset(ctx, 0,sizeof(struct dbg_detach_ctx));\n ctx->now = sys_now();\n if (!conn_bgwork(conn, detach_work, detach_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx);\n }\n}\n\n// DEBUG subcommand (args...)\nstatic void cmdDEBUG(struct conn *conn, struct args *args) {\n if (args->len <= 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n // args = args[1:]\n args = &(struct args){ .bufs = args->bufs+1, .len = args->len-1 };\n if (argeq(args, 0, \"populate\")) {\n cmdDEBUG_populate(conn, args);\n } else if (argeq(args, 0, \"detach\")) {\n cmdDEBUG_detach(conn, args);\n } else {\n conn_write_error(conn, \"ERR unknown subcommand\");\n }\n}\n\nstatic void cmdECHO(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"ECHO\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n}\n\nstatic void cmdPING(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n if (args->len == 1) {\n pg_write_simple_row_str_ready(conn, \"message\", \"PONG\", \"PING\"); \n } else {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"PING\");\n }\n } else {\n if (args->len == 1) {\n conn_write_string(conn, \"PONG\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n }\n}\n\nstatic void cmdQUIT(struct conn *conn, struct args *args) {\n (void)args;\n if (conn_proto(conn) == PROTO_RESP) {\n conn_write_string(conn, \"OK\");\n }\n conn_close(conn);\n}\n\n// TOUCH key [key...]\nstatic void cmdTOUCH(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t touched = 0;\n struct pogocache_load_opts opts = { \n .time = now,\n };\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_touch_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n stat_touch_hits_incr(conn);\n touched++;\n } else {\n stat_touch_misses_incr(conn);\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"TOUCH %\" PRIi64, touched);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, touched);\n }\n}\n\nstruct get64ctx {\n bool ok;\n bool isunsigned;\n union {\n int64_t ival;\n uint64_t uval;\n };\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n};\n\nunion delta { \n uint64_t u;\n int64_t i;\n};\n\nstatic void get64(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update;\n struct get64ctx *ctx = udata;\n ctx->flags = flags;\n ctx->expires = expires;\n ctx->cas = cas;\n if (ctx->isunsigned) {\n ctx->ok = parse_u64(val, vallen, &ctx->uval);\n } else {\n ctx->ok = parse_i64(val, vallen, &ctx->ival);\n }\n}\n\nstatic void execINCRDECR(struct conn *conn, const char *key, size_t keylen, \n union delta delta, bool decr, bool isunsigned, const char *cmdname)\n{\n bool hit = false;\n bool miss = false;\n int64_t now = sys_now();\n struct get64ctx ctx = { .isunsigned = isunsigned };\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts gopts = {\n .time = now,\n .entry = get64,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &gopts);\n bool found = status == POGOCACHE_FOUND;\n if (found && !ctx.ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR cannot increment or \"\n \"decrement non-numeric value\\r\\n\");\n goto done;\n }\n goto fail_value_non_numeric;\n } else if (!found && conn_proto(conn) == PROTO_MEMCACHE) {\n miss = true;\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n goto done;\n }\n // add or subtract\n bool overflow;\n if (isunsigned) {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.uval, delta.u, &ctx.uval);\n } else {\n overflow = __builtin_add_overflow(ctx.uval, delta.u, &ctx.uval);\n }\n } else {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.ival, delta.i, &ctx.ival);\n } else {\n overflow = __builtin_add_overflow(ctx.ival, delta.i, &ctx.ival);\n }\n }\n if (overflow && conn_proto(conn) != PROTO_MEMCACHE) {\n goto fail_overflow;\n }\n // re-set the value\n char val[24];\n size_t vallen;\n if (isunsigned) {\n vallen = u64toa(ctx.uval, (uint8_t*)val);\n } else {\n vallen = i64toa(ctx.ival, (uint8_t*)val);\n }\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires, \n .flags = ctx.flags, \n .cas = ctx.cas,\n .udata = &ctx,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n char val[24];\n if (isunsigned) {\n snprintf(val, sizeof(val), \"%\" PRIu64, ctx.uval);\n } else {\n snprintf(val, sizeof(val), \"%\" PRIi64, ctx.ival);\n }\n pg_write_simple_row_str_readyf(conn, \"value\", val, \"%s\", cmdname);\n } else {\n if (isunsigned) {\n conn_write_uint(conn, ctx.uval);\n } else {\n conn_write_int(conn, ctx.ival);\n }\n }\n hit = true;\n goto done;\nfail_value_non_numeric:\n conn_write_error(conn, ERR_INVALID_INTEGER);\n goto done;\nfail_overflow:\n conn_write_error(conn, \"ERR increment or decrement would overflow\");\n goto done;\ndone:\n if (hit) {\n if (decr) {\n stat_decr_hits_incr(conn);\n } else {\n stat_incr_hits_incr(conn);\n }\n } else if (miss) {\n if (decr) {\n stat_decr_misses_incr(conn);\n } else {\n stat_incr_misses_incr(conn);\n }\n }\n pogocache_end(batch);\n}\n\nstatic void cmdINCRDECRBY(struct conn *conn, struct args *args, \n bool decr, const char *cmdname)\n{\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta;\n bool ok;\n if (isunsigned) {\n ok = argu64(args, 2, &delta.u);\n } else {\n ok = argi64(args, 2, &delta.i);\n }\n if (!ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR invalid numeric delta \"\n \"argument\\r\\n\");\n } else {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n }\n return;\n }\n execINCRDECR(conn, key, keylen, delta, decr, isunsigned, cmdname);\n}\n\n// DECRBY key num\nstatic void cmdDECRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, true, \"DECRBY\");\n}\n\n// INCRBY key num\nstatic void cmdINCRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, false, \"INCRBY\");\n}\n\n// DECR key\nstatic void cmdDECR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, true, isunsigned, \"DECR\");\n}\n\n// INCR key\nstatic void cmdINCR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, false, isunsigned, \"INCR\");\n}\n\nstruct appendctx {\n bool prepend;\n uint32_t flags;\n int64_t expires;\n const char *val;\n size_t vallen;\n char *outval;\n size_t outvallen;\n};\n\nstatic void append_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires, \n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update, (void)cas;\n struct appendctx *ctx = udata;\n ctx->expires = expires;\n ctx->flags = flags;\n ctx->outvallen = vallen+ctx->vallen;\n ctx->outval = xmalloc(ctx->outvallen);\n if (ctx->prepend) {\n memcpy(ctx->outval, ctx->val, ctx->vallen);\n memcpy(ctx->outval+ctx->vallen, val, vallen);\n } else {\n memcpy(ctx->outval, val, vallen);\n memcpy(ctx->outval+vallen, ctx->val, ctx->vallen);\n }\n}\n\n// APPEND \nstatic void cmdAPPEND(struct conn *conn, struct args *args) {\n int64_t now = sys_now();\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int proto = conn_proto(conn);\n bool prepend = argeq(args, 0, \"prepend\");\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n size_t vallen;\n const char *val = args_at(args, 2, &vallen);\n struct appendctx ctx = { \n .prepend = prepend,\n .val = val,\n .vallen = vallen,\n };\n size_t len;\n // Use a batch transaction for key isolation.\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = append_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &lopts);\n if (status == POGOCACHE_NOTFOUND) {\n if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"NOT_STORED\\r\\n\");\n goto done;\n }\n len = vallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n } else {\n if (ctx.outvallen > MAXARGSZ) {\n // do not let values become larger than 500MB\n xfree(ctx.outval);\n conn_write_error(conn, \"ERR value too large\");\n goto done;\n }\n len = ctx.outvallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires,\n .flags = ctx.flags,\n };\n status = pogocache_store(batch, key, keylen, ctx.outval, ctx.outvallen, \n &sopts);\n xfree(ctx.outval);\n }\n if (status == POGOCACHE_NOMEM) {\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %zu\", prepend?\"PREPEND\":\"APPEND\", len);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"STORED\\r\\n\");\n } else {\n conn_write_int(conn, len);\n }\ndone:\n pogocache_end(batch);\n}\n\nstatic void cmdPREPEND(struct conn *conn, struct args *args) {\n cmdAPPEND(conn, args);\n}\n\nstatic void cmdAUTH(struct conn *conn, struct args *args) {\n stat_auth_cmds_incr(0);\n if (!argeq(args, 0, \"auth\")) {\n stat_auth_errors_incr(0);\n goto noauth;\n }\n if (args->len == 3) {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n if (args->len > 3) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n if (args->len == 1) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (args->bufs[1].len != strlen(auth) || \n memcmp(auth, args->bufs[1].data, args->bufs[1].len) != 0)\n {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n conn_setauth(conn, true);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_complete(conn, \"AUTH OK\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\nnoauth:\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \n \"CLIENT_ERROR Authentication required\\r\\n\");\n } else {\n conn_write_error(conn, \"NOAUTH Authentication required.\");\n }\n return;\nwrongpass:\n conn_write_error(conn, \n \"WRONGPASS invalid username-password pair or user is disabled.\");\n}\n\nstruct stats {\n // use the args type as a list.\n struct args args;\n};\n\nstatic void stats_begin(struct stats *stats) {\n memset(stats, 0, sizeof(struct stats));\n}\n\nstatic void stats_end(struct stats *stats, struct conn *conn) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"stat\", \"value\" }, 2);\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n pg_write_row_data(conn, (const char*[]){ stat, val }, \n (size_t[]){ strlen(stat), strlen(val) }, 2);\n }\n pg_write_completef(conn, \"STATS %zu\", stats->args.len);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n char line[512];\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n size_t n = snprintf(line, sizeof(line), \"STAT %s\\r\\n\", stat);\n conn_write_raw(conn, line, n);\n }\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n } else {\n conn_write_array(conn, stats->args.len);\n for (size_t i = 0; i < stats->args.len; i++) {\n conn_write_array(conn, 2);\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n conn_write_bulk_cstr(conn, key);\n conn_write_bulk_cstr(conn, val);\n }\n }\n args_free(&stats->args);\n}\n\nstatic void stats_printf(struct stats *stats, const char *format, ...) {\n // initializing list pointer\n char line[512];\n va_list ap;\n va_start(ap, format);\n size_t len = vsnprintf(line, sizeof(line)-1, format, ap);\n va_end(ap);\n args_append(&stats->args, line, len+1, false); // include null-terminator\n}\n\nstatic void stats(struct conn *conn) {\n struct stats stats;\n stats_begin(&stats);\n stats_printf(&stats, \"pid %d\", getpid());\n stats_printf(&stats, \"uptime %.0f\", (sys_now()-procstart)/1e9);\n stats_printf(&stats, \"time %.0f\", sys_unixnow()/1e9);\n stats_printf(&stats, \"product %s\", \"pogocache\");\n stats_printf(&stats, \"version %s\", version);\n stats_printf(&stats, \"githash %s\", githash);\n stats_printf(&stats, \"pointer_size %zu\", sizeof(uintptr_t)*8);\n struct rusage usage;\n if (getrusage(RUSAGE_SELF, &usage) == 0) {\n stats_printf(&stats, \"rusage_user %ld.%06ld\",\n usage.ru_utime.tv_sec, usage.ru_utime.tv_usec);\n stats_printf(&stats, \"rusage_system %ld.%06ld\",\n usage.ru_stime.tv_sec, usage.ru_stime.tv_usec);\n }\n stats_printf(&stats, \"max_connections %zu\", maxconns);\n stats_printf(&stats, \"curr_connections %zu\", net_nconns());\n stats_printf(&stats, \"total_connections %zu\", net_tconns());\n stats_printf(&stats, \"rejected_connections %zu\", net_rconns());\n stats_printf(&stats, \"cmd_get %\" PRIu64, stat_cmd_get());\n stats_printf(&stats, \"cmd_set %\" PRIu64, stat_cmd_set());\n stats_printf(&stats, \"cmd_flush %\" PRIu64, stat_cmd_flush());\n stats_printf(&stats, \"cmd_touch %\" PRIu64, stat_cmd_touch());\n stats_printf(&stats, \"get_hits %\" PRIu64, stat_get_hits());\n stats_printf(&stats, \"get_misses %\" PRIu64, stat_get_misses());\n stats_printf(&stats, \"delete_misses %\" PRIu64, stat_delete_misses());\n stats_printf(&stats, \"delete_hits %\" PRIu64, stat_delete_hits());\n stats_printf(&stats, \"incr_misses %\" PRIu64, stat_incr_misses());\n stats_printf(&stats, \"incr_hits %\" PRIu64, stat_incr_hits());\n stats_printf(&stats, \"decr_misses %\" PRIu64, stat_decr_misses());\n stats_printf(&stats, \"decr_hits %\" PRIu64, stat_decr_hits());\n stats_printf(&stats, \"touch_hits %\" PRIu64, stat_touch_hits());\n stats_printf(&stats, \"touch_misses %\" PRIu64, stat_touch_misses());\n stats_printf(&stats, \"store_too_large %\" PRIu64, stat_store_too_large());\n stats_printf(&stats, \"store_no_memory %\" PRIu64, stat_store_no_memory());\n stats_printf(&stats, \"auth_cmds %\" PRIu64, stat_auth_cmds());\n stats_printf(&stats, \"auth_errors %\" PRIu64, stat_auth_errors());\n stats_printf(&stats, \"threads %d\", nthreads);\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n stats_printf(&stats, \"rss %zu\", meminfo.rss);\n struct pogocache_size_opts sopts = { .entriesonly=true };\n stats_printf(&stats, \"bytes %zu\", pogocache_size(cache, &sopts));\n stats_printf(&stats, \"curr_items %zu\", pogocache_count(cache, 0));\n stats_printf(&stats, \"total_items %\" PRIu64, pogocache_total(cache, 0));\n stats_end(&stats, conn);\n}\n\nstatic void cmdSTATS(struct conn *conn, struct args *args) {\n if (args->len == 1) {\n return stats(conn);\n }\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\n// Commands hash table. Lazy loaded per thread.\n// Simple open addressing using case-insensitive fnv1a hashes.\nstatic int nbuckets;\nstatic struct cmd *buckets;\n\nstruct cmd {\n const char *name;\n void (*func)(struct conn *conn, struct args *args);\n};\n\nstatic struct cmd cmds[] = {\n { \"set\", cmdSET }, // pg\n { \"get\", cmdGET }, // pg\n { \"del\", cmdDEL }, // pg\n { \"mget\", cmdMGET }, // pg\n { \"mgets\", cmdMGET }, // pg cas detected\n { \"ttl\", cmdTTL }, // pg\n { \"pttl\", cmdTTL }, // pg\n { \"expire\", cmdEXPIRE }, // pg\n { \"setex\", cmdSETEX }, // pg\n { \"dbsize\", cmdDBSIZE }, // pg\n { \"quit\", cmdQUIT }, // pg\n { \"echo\", cmdECHO }, // pg\n { \"exists\", cmdEXISTS }, // pg\n { \"flushdb\", cmdFLUSHALL }, // pg\n { \"flushall\", cmdFLUSHALL }, // pg\n { \"flush\", cmdFLUSHALL }, // pg\n { \"purge\", cmdPURGE }, // pg\n { \"sweep\", cmdSWEEP }, // pg\n { \"keys\", cmdKEYS }, // pg\n { \"ping\", cmdPING }, // pg\n { \"touch\", cmdTOUCH }, // pg\n { \"debug\", cmdDEBUG }, // pg\n { \"incrby\", cmdINCRBY }, // pg\n { \"decrby\", cmdDECRBY }, // pg\n { \"incr\", cmdINCR }, // pg\n { \"decr\", cmdDECR }, // pg\n { \"uincrby\", cmdINCRBY }, // pg unsigned detected in signed operation\n { \"udecrby\", cmdDECRBY }, // pg unsigned detected in signed operation\n { \"uincr\", cmdINCR }, // pg unsigned detected in signed operation\n { \"udecr\", cmdDECR }, // pg unsigned detected in signed operation\n { \"append\", cmdAPPEND }, // pg\n { \"prepend\", cmdPREPEND }, // pg\n { \"auth\", cmdAUTH }, // pg\n { \"save\", cmdSAVELOAD }, // pg\n { \"load\", cmdSAVELOAD }, // pg\n { \"stats\", cmdSTATS }, // pg memcache style stats\n};\n\nstatic void build_commands_table(void) {\n static __thread bool buckets_ready = false;\n static pthread_mutex_t cmd_build_lock = PTHREAD_MUTEX_INITIALIZER;\n static bool built = false;\n if (!buckets_ready) {\n pthread_mutex_lock(&cmd_build_lock);\n if (!built) {\n int ncmds = sizeof(cmds)/sizeof(struct cmd);\n int n = ncmds*8;\n nbuckets = 2;\n while (nbuckets < n) {\n nbuckets *= 2;\n }\n buckets = xmalloc(nbuckets*sizeof(struct cmd));\n memset(buckets, 0, nbuckets*sizeof(struct cmd));\n uint64_t hash;\n for (int i = 0; i < ncmds; i++) {\n hash = fnv1a_case(cmds[i].name, strlen(cmds[i].name));\n for (int j = 0; j < nbuckets; j++) {\n int k = (j+hash)&(nbuckets-1);\n if (!buckets[k].name) {\n buckets[k] = cmds[i];\n break;\n }\n }\n }\n built = true;\n }\n pthread_mutex_unlock(&cmd_build_lock);\n buckets_ready = true;\n }\n}\n\nstatic struct cmd *get_cmd(const char *name, size_t namelen) {\n build_commands_table();\n uint32_t hash = fnv1a_case(name, namelen);\n int j = hash&(nbuckets-1);\n while (1) {\n if (!buckets[j].name) {\n return 0;\n }\n if (argeq_bytes(name, namelen, buckets[j].name)) {\n return &buckets[j];\n }\n j++;\n }\n}\n\nvoid evcommand(struct conn *conn, struct args *args) {\n if (useauth && !conn_auth(conn)) {\n if (conn_proto(conn) == PROTO_HTTP) {\n // Let HTTP traffic through.\n // The request has already been authorized in http.c\n } else {\n cmdAUTH(conn, args);\n return;\n }\n }\n if (verb > 1) {\n if (!argeq(args, 0, \"auth\")) {\n args_print(args);\n }\n }\n struct cmd *cmd = get_cmd(args->bufs[0].data, args->bufs[0].len);\n if (cmd) {\n cmd->func(conn, args);\n } else {\n if (verb > 0) {\n printf(\"# Unknown command '%.*s'\\n\", (int)args->bufs[0].len,\n args->bufs[0].data);\n }\n char errmsg[128];\n snprintf(errmsg, sizeof(errmsg), \"ERR unknown command '%.*s'\", \n (int)args->bufs[0].len, args->bufs[0].data);\n conn_write_error(conn, errmsg);\n }\n}\n"], ["/pogocache/src/conn.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit conn.c are interface functions for a network connection.\n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"args.h\"\n#include \"cmds.h\"\n#include \"xmalloc.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"helppage.h\"\n\n#define MAXPACKETSZ 1048576 // Maximum read packet size\n\nstruct conn {\n struct net_conn *conn5; // originating connection\n struct buf packet; // current incoming packet\n int proto; // connection protocol (memcache, http, etc)\n bool auth; // user is authorized\n bool noreply; // only for memcache\n bool keepalive; // only for http\n int httpvers; // only for http\n struct args args; // command args, if any\n struct pg *pg; // postgres context, only if proto is postgres\n};\n\nbool conn_istls(struct conn *conn) {\n return net_conn_istls(conn->conn5);\n}\n\nint conn_proto(struct conn *conn) {\n return conn->proto;\n}\n\nbool conn_auth(struct conn *conn) {\n return conn->auth;\n}\n\nvoid conn_setauth(struct conn *conn, bool ok) {\n conn->auth = ok;\n}\n\nbool conn_isclosed(struct conn *conn) {\n return net_conn_isclosed(conn->conn5);\n}\n\nvoid conn_close(struct conn *conn) {\n net_conn_close(conn->conn5);\n}\n\nvoid evopened(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = xmalloc(sizeof(struct conn));\n memset(conn, 0, sizeof(struct conn));\n conn->conn5 = conn5;\n net_conn_setudata(conn5, conn);\n}\n\nvoid evclosed(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n buf_clear(&conn->packet);\n args_free(&conn->args);\n pg_free(conn->pg);\n xfree(conn);\n}\n\n// network data handler\n// The evlen may be zero when returning from a bgwork routine, while having\n// existing data in the connection packet.\nvoid evdata(struct net_conn *conn5, const void *evdata, size_t evlen,\n void *udata)\n{\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n if (conn_isclosed(conn)) {\n goto close;\n }\n#ifdef DATASETOK\n if (evlen == 14 && memcmp(evdata, \"*1\\r\\n$4\\r\\nPING\\r\\n\", 14) == 0) {\n conn_write_raw(conn, \"+PONG\\r\\n\", 7);\n } else if (evlen == 13 && memcmp(evdata, \"*2\\r\\n$3\\r\\nGET\\r\\n\", 13) == 0) {\n conn_write_raw(conn, \"$1\\r\\nx\\r\\n\", 7);\n } else {\n conn_write_raw(conn, \"+OK\\r\\n\", 5);\n }\n return;\n#endif\n char *data;\n size_t len;\n bool copied;\n if (conn->packet.len == 0) {\n data = (char*)evdata;\n len = evlen;\n copied = false;\n } else {\n buf_append(&conn->packet, evdata, evlen);\n len = conn->packet.len;\n data = conn->packet.data;\n copied = true;\n }\n while (len > 0 && !conn_isclosed(conn)) {\n // Parse the command\n ssize_t n = parse_command(data, len, &conn->args, &conn->proto, \n &conn->noreply, &conn->httpvers, &conn->keepalive, &conn->pg);\n if (n == 0) {\n // Not enough data provided yet.\n break;\n } else if (n == -1) {\n // Protocol error occurred.\n conn_write_error(conn, parse_lasterror());\n if (conn->proto == PROTO_MEMCACHE) {\n // Memcache doesn't close, but we'll need to know the last\n // character position to continue and revert back to it so\n // we can attempt to continue to the next command.\n n = parse_lastmc_n();\n } else {\n // Close on protocol error\n conn_close(conn);\n break;\n }\n } else if (conn->args.len == 0) {\n // There were no command arguments provided.\n if (conn->proto == PROTO_POSTGRES) {\n if (!pg_respond(conn, conn->pg)) {\n // close connection\n conn_close(conn);\n break;\n }\n } else if (conn->proto == PROTO_MEMCACHE) {\n // Memcache simply returns a nondescript error.\n conn_write_error(conn, \"ERROR\");\n } else if (conn->proto == PROTO_HTTP) {\n // HTTP must always return arguments.\n assert(!\"PROTO_HTTP\");\n } else if (conn->proto == PROTO_RESP) {\n // RESP just continues until it gets args.\n }\n } else if (conn->proto == PROTO_POSTGRES && !conn->pg->ready) {\n // This should not have been reached. The client did not \n // send a startup message\n conn_close(conn);\n break;\n } else if (conn->proto != PROTO_POSTGRES || \n pg_precommand(conn, &conn->args, conn->pg))\n {\n evcommand(conn, &conn->args);\n }\n len -= n;\n data += n;\n if (net_conn_bgworking(conn->conn5)) {\n // BGWORK(0)\n break;\n }\n if (conn->proto == PROTO_HTTP) {\n conn_close(conn);\n }\n }\n if (conn_isclosed(conn)) {\n goto close;\n }\n if (len == 0) {\n if (copied) {\n if (conn->packet.cap > MAXPACKETSZ) {\n buf_clear(&conn->packet);\n }\n conn->packet.len = 0;\n }\n } else {\n if (copied) {\n memmove(conn->packet.data, data, len);\n conn->packet.len = len;\n } else {\n buf_append(&conn->packet, data, len);\n }\n }\n return;\nclose:\n conn_close(conn);\n}\n\nstruct bgworkctx {\n struct conn *conn;\n void *udata;\n void(*work)(void *udata);\n void(*done)(struct conn *conn, void *udata);\n};\n\nstatic void work5(void *udata) {\n struct bgworkctx *ctx = udata;\n ctx->work(ctx->udata);\n}\n\nstatic void done5(struct net_conn *conn, void *udata) {\n (void)conn;\n struct bgworkctx *ctx = udata;\n ctx->done(ctx->conn, ctx->udata);\n xfree(ctx);\n}\n\n// conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool conn_bgwork(struct conn *conn, void(*work)(void *udata), \n void(*done)(struct conn *conn, void *udata), void *udata)\n{\n struct bgworkctx *ctx = xmalloc(sizeof(struct bgworkctx));\n ctx->conn = conn;\n ctx->udata = udata;\n ctx->work = work;\n ctx->done = done;\n if (!net_conn_bgwork(conn->conn5, work5, done5, ctx)) {\n xfree(ctx);\n return false;\n }\n return true;\n}\n\nstatic void writeln(struct conn *conn, char ch, const void *data, ssize_t len) {\n if (len < 0) {\n len = strlen(data);\n }\n net_conn_out_ensure(conn->conn5, 3+len);\n net_conn_out_write_byte_nocheck(conn->conn5, ch);\n size_t mark = net_conn_out_len(conn->conn5);\n net_conn_out_write_nocheck(conn->conn5, data, len);\n net_conn_out_write_byte_nocheck(conn->conn5, '\\r');\n net_conn_out_write_byte_nocheck(conn->conn5, '\\n');\n uint8_t *out = (uint8_t*)net_conn_out(conn->conn5);\n for (ssize_t i = mark; i < len; i++) {\n if (out[i] < ' ') {\n out[i] = ' ';\n }\n }\n}\n\nstatic void write_error(struct conn *conn, const char *err, bool server) {\n if (conn->proto == PROTO_MEMCACHE) {\n if (strstr(err, \"ERR \") == err) {\n // convert to client or server error\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n if (server) {\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err+4); \n } else {\n snprintf(err2, err2sz, \"CLIENT_ERROR %s\\r\\n\", err+4); \n }\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n if (server) {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else if (strstr(err, \"CLIENT_ERROR \") == err || \n strstr(err, \"CLIENT_ERROR \") == err)\n {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"%s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n conn_write_raw(conn, \"ERROR\\r\\n\", 7);\n }\n }\n } else if (conn->proto == PROTO_POSTGRES) {\n if (strstr(err, \"ERR \") == err) {\n err = err+4;\n }\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n } else if (conn->proto == PROTO_HTTP) {\n if (strstr(err, \"ERR \") == err) {\n err += 4;\n }\n if (strcmp(err, \"Show Help HTML\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_HTML, -1);\n } else if (strcmp(err, \"Show Help TEXT\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_TEXT, -1);\n } else if (strcmp(err, \"Method Not Allowed\") == 0) {\n conn_write_http(conn, 405, \"Method Not Allowed\", \n \"Method Not Allowed\\r\\n\", -1);\n } else if (strcmp(err, \"Unauthorized\") == 0) {\n conn_write_http(conn, 401, \"Unauthorized\", \n \"Unauthorized\\r\\n\", -1);\n } else if (strcmp(err, \"Bad Request\") == 0) {\n conn_write_http(conn, 400, \"Bad Request\", \n \"Bad Request\\r\\n\", -1);\n } else {\n size_t sz = strlen(err)+32;\n char *err2 = xmalloc(sz);\n snprintf(err2, sz, \"ERR %s\\r\\n\", err);\n conn_write_http(conn, 500, \"Internal Server Error\", \n err2, -1);\n xfree(err2);\n }\n } else {\n writeln(conn, '-', err, -1);\n }\n}\n\nvoid conn_write_error(struct conn *conn, const char *err) {\n bool server = false;\n if (strcmp(err, ERR_OUT_OF_MEMORY) == 0) {\n server = true;\n }\n write_error(conn, err, server);\n}\n\nvoid conn_write_string(struct conn *conn, const char *cstr) {\n writeln(conn, '+', cstr, -1);\n}\n\nvoid conn_write_null(struct conn *conn) {\n net_conn_out_write(conn->conn5, \"$-1\\r\\n\", 5);\n}\n\nvoid resp_write_bulk(struct buf *buf, const void *data, size_t len) {\n uint8_t str[32];\n size_t n = u64toa(len, str);\n buf_append_byte(buf, '$');\n buf_append(buf, str, n);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n buf_append(buf, data, len);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n}\n\nvoid conn_write_bulk(struct conn *conn, const void *data, size_t len) {\n net_conn_out_ensure(conn->conn5, 32+len);\n size_t olen = net_conn_out_len(conn->conn5);\n uint8_t *base = (uint8_t*)net_conn_out(conn->conn5)+olen;\n uint8_t *p = base;\n *(p++) = '$';\n p += u64toa(len, p);\n *(p++) = '\\r';\n *(p++) = '\\n';\n memcpy(p, data, len);\n p += len;\n *(p++) = '\\r';\n *(p++) = '\\n';\n net_conn_out_setlen(conn->conn5, olen + (p-base));\n}\n\nvoid conn_write_raw(struct conn *conn, const void *data, size_t len) {\n net_conn_out_write(conn->conn5, data, len);\n}\n\nvoid conn_write_http(struct conn *conn, int code, const char *status,\n const void *body, ssize_t bodylen)\n{\n if (bodylen == -1) {\n if (!body) {\n body = status;\n }\n bodylen = strlen(body);\n }\n char resp[512];\n size_t n = snprintf(resp, sizeof(resp), \n \"HTTP/1.1 %d %s\\r\\n\"\n \"Content-Length: %zu\\r\\n\"\n \"Connection: Close\\r\\n\"\n \"\\r\\n\",\n code, status, bodylen);\n conn_write_raw(conn, resp, n);\n if (bodylen > 0) {\n conn_write_raw(conn, body, bodylen);\n }\n}\n\nvoid conn_write_array(struct conn *conn, size_t count) {\n uint8_t str[24];\n size_t n = u64toa(count, str);\n writeln(conn, '*', str, n);\n}\n\nvoid conn_write_uint(struct conn *conn, uint64_t value) {\n uint8_t buf[24];\n size_t n = u64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, '+', buf, n); // the '+' is needed for unsigned int\n }\n}\n\nvoid conn_write_int(struct conn *conn, int64_t value) {\n uint8_t buf[24];\n size_t n = i64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, ':', buf, n);\n }\n}\n\nvoid conn_write_raw_cstr(struct conn *conn, const char *cstr) {\n conn_write_raw(conn, cstr, strlen(cstr));\n}\n\nvoid conn_write_bulk_cstr(struct conn *conn, const char *cstr) {\n conn_write_bulk(conn, cstr, strlen(cstr));\n}\n\nvoid stat_cmd_get_incr(struct conn *conn) {\n net_stat_cmd_get_incr(conn->conn5);\n}\n\nvoid stat_cmd_set_incr(struct conn *conn) {\n net_stat_cmd_set_incr(conn->conn5);\n}\n\nvoid stat_get_hits_incr(struct conn *conn) {\n net_stat_get_hits_incr(conn->conn5);\n}\n\nvoid stat_get_misses_incr(struct conn *conn) {\n net_stat_get_misses_incr(conn->conn5);\n}\n\nbool pg_execute(struct conn *conn) {\n return conn->pg->execute;\n}\n\nstruct pg *conn_pg(struct conn *conn) {\n return conn->pg;\n}\n"], ["/pogocache/src/pogocache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit pogocache.c is the primary caching engine library, which is designed\n// to be standalone and embeddable.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"pogocache.h\"\n\n#define MINLOADFACTOR_RH 55 // 55%\n#define MAXLOADFACTOR_RH 95 // 95%\n#define DEFLOADFACTOR 75 // 75%\n#define SHRINKAT 10 // 10%\n#define DEFSHARDS 4096 // default number of shards\n#define INITCAP 64 // intial number of buckets per shard\n\n// #define DBGCHECKENTRY\n// #define EVICTONITER\n// #define HALFSECONDTIME\n// #define NO48BITPTRS\n\n#if INTPTR_MAX == INT64_MAX\n#ifdef NO48BITPTRS\n#define PTRSIZE 8\n#else\n#define PTRSIZE 6\n#endif\n#elif INTPTR_MAX == INT32_MAX\n#define PTRSIZE 4\n#else\n#error Unknown pointer size\n#endif\n\nstatic struct pogocache_count_opts defcountopts = { 0 };\nstatic struct pogocache_total_opts deftotalopts = { 0 };\nstatic struct pogocache_size_opts defsizeopts = { 0 };\nstatic struct pogocache_sweep_opts defsweepopts = { 0 };\nstatic struct pogocache_clear_opts defclearopts = { 0 };\nstatic struct pogocache_store_opts defstoreopts = { 0 };\nstatic struct pogocache_load_opts defloadopts = { 0 };\nstatic struct pogocache_delete_opts defdeleteopts = { 0 };\nstatic struct pogocache_iter_opts defiteropts = { 0 };\nstatic struct pogocache_sweep_poll_opts defsweeppollopts = { 0 };\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// returns monotonic nanoseconds of the CPU clock.\nstatic int64_t gettime(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// returns offset of system clock since first call in thread.\nstatic int64_t getnow(void) {\n return gettime();\n}\n\n// https://github.com/tidwall/th64\nstatic uint64_t th64(const void *data, size_t len, uint64_t seed) {\n uint8_t*p=(uint8_t*)data,*e=p+len;\n uint64_t r=0x14020a57acced8b7,x,h=seed;\n while(p+8<=e)memcpy(&x,p,8),x*=r,p+=8,x=x<<31|x>>33,h=h*r^x,h=h<<31|h>>33;\n while(p>31,h*=r,h^=h>>31,h*=r,h^=h>>31,h*=r,h);\n}\n\n// Load a pointer from an unaligned memory.\nstatic void *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\n// Store a pointer into unaligned memory.\nstatic void store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nstatic uint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\n// Sixpack compression algorithm\n// - Converts a simple 8-bit string into 6-bit string.\n// - Intended to be used on small strings that only use characters commonly\n// used for keys in KV data stores.\n// - Allows the following 64 item character set:\n// -.0123456789:ABCDEFGHIJKLMNOPRSTUVWXY_abcdefghijklmnopqrstuvwxy\n// Note that the characters \"QZz\" are not included.\n// - Sortable and comparable using memcmp.\nstatic char tosix[256] = {\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0-15\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, // 32-47\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 0, 0, // 48-63\n 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, // 64-79\n 29, 0, 30, 31, 32, 33, 34, 35, 36, 37, 0, 0, 0, 0, 0, 38, // 80-95\n 0, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, // 96-111\n 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, // 112-127\n};\n\nstatic char fromsix[] = {\n 0, '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', '_', 'a', 'b', 'c',\n 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'\n};\n\n// 0: [000000..] bitpos: 0\n// 1: [00000011][1111....] bitpos: 6\n// 2: [00000011][11112222][22......] bitpos: 12 \n// 3: [00000011][11112222][22333333] bitpos: 18\n\n// Sixpack data\n// Fills the data in dst and returns the number of bytes filled.\n// Returns 0 if not a sixpackable.\n// The dst array must be large enough to hold packed value\nstatic int sixpack(const char *data, int len, char dst[]){\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n for (int i = 0; i < len; i++) {\n int k6v = tosix[bytes[i]];\n if (k6v == 0) {\n return 0;\n }\n if (i%4 == 0) {\n dst[j++] = k6v<<2;\n } else if (i%4 == 1) {\n dst[j-1] |= k6v>>4;\n dst[j++] = k6v<<4;\n } else if (i%4 == 2) {\n dst[j-1] |= k6v>>2;\n dst[j++] = k6v<<6;\n } else {\n dst[j-1] |= k6v;\n }\n }\n return j;\n}\n\n// (Un)sixpack data.\n// Fills the data in dst and returns the len of original data.\n// The data must be sixpacked and len must be > 0.\n// The dst array must be large enough to hold unpacked value\nstatic int unsixpack(const char *data, int len, char dst[]) {\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n int k = 0;\n for (int i = 0; i < len; i++) {\n if (k == 0) {\n dst[j++] = fromsix[bytes[i]>>2];\n k++;\n } else if (k == 1) {\n dst[j++] = fromsix[((bytes[i-1]<<4)|(bytes[i]>>4))&63];\n k++;\n } else {\n dst[j++] = fromsix[((bytes[i-1]<<2)|(bytes[i]>>6))&63];\n dst[j++] = fromsix[bytes[i]&63];\n k = 0;\n }\n }\n if (j > 0 && dst[j-1] == 0) {\n j--;\n }\n return j;\n}\n\n// Safely adds two int64_t values, clamping on overflow.\nstatic int64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n/// https://github.com/tidwall/varint.c\nstatic int varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nstatic int varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\n#ifdef HALFSECONDTIME\ntypedef uint32_t etime_t;\n#else\ntypedef int64_t etime_t;\n#endif\n\n\n// Mostly a copy of the pogocache_opts, but used internally\n// See the opts_to_ctx function for translation.\nstruct pgctx {\n void *(*malloc)(size_t);\n void (*free)(void*);\n size_t (*malloc_size)(void*);\n void (*yield)(void *udata);\n void (*evicted)(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata);\n void *udata;\n bool usecas;\n bool nosixpack;\n bool noevict;\n bool allowshrink;\n bool usethreadbatch;\n int nshards;\n double loadfactor;\n double shrinkfactor;\n uint64_t seed;\n};\n\n// The entry structure is a simple allocation with all the fields, being \n// variable in size, slammed together contiguously. There's a one byte header\n// that provides information about what is available in the structure.\n// The format is: (header,time,expires?,flags?,cas?,key,value)\n// The expires, flags, and cas fields are optional. The optionality depends on\n// header bit flags.\nstruct entry;\n\n// Returns the sizeof the entry struct, which takes up no space at all.\n// This would be like doing a sizeof(struct entry), if entry had a structure.\nstatic size_t entry_struct_size(void) {\n return 0;\n}\n\n// Returns the data portion of the entry, which is the entire allocation.\nstatic const uint8_t *entry_data(const struct entry *entry) {\n return (uint8_t*)entry;\n}\n\nstatic int64_t entry_expires(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n int64_t x = 0;\n if ((hdr>>0)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\nstatic int64_t entry_time(struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n etime_t etime;\n memcpy(&etime, p+1, sizeof(etime_t));\n#ifdef HALFSECONDTIME\n int64_t time = (int64_t)etime * INT64_C(500000000);\n#else \n int64_t time = etime;\n#endif \n return time;\n}\n\nstatic void entry_settime(struct entry *entry, int64_t time) {\n const uint8_t *p = entry_data(entry);\n#ifdef HALFSECONDTIME\n // Eviction time is stored as half seconds.\n etime_t etime = time / INT64_C(500000000);\n etime = etime > UINT32_MAX ? UINT32_MAX : etime;\n#else\n etime_t etime = time;\n#endif\n memcpy((uint8_t*)(p+1), &etime, sizeof(etime_t));\n}\n\nstatic int entry_alive_exp(int64_t expires, int64_t etime, int64_t now,\n int64_t cleartime)\n{\n return etime < cleartime ? POGOCACHE_REASON_CLEARED :\n expires > 0 && expires <= now ? POGOCACHE_REASON_EXPIRED :\n 0;\n}\n\nstatic int entry_alive(struct entry *entry, int64_t now, int64_t cleartime) {\n int64_t etime = entry_time(entry);\n int64_t expires = entry_expires(entry);\n return entry_alive_exp(expires, etime, now, cleartime);\n}\n\nstatic uint64_t entry_cas(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n uint64_t x = 0;\n if ((hdr>>2)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\n// returns the key. If using sixpack make sure to copy the result asap.\nstatic const char *entry_key(const struct entry *entry, size_t *keylen_out,\n char buf[128])\n{\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n if ((hdr>>3)&1) {\n keylen = unsixpack(key, (int)keylen, buf);\n key = buf;\n }\n *keylen_out = keylen;\n return key;\n}\n\n// returns the raw key. sixpack will be returned in it's raw format\nstatic const char *entry_rawkey(const struct entry *entry, size_t *keylen_out) {\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n *keylen_out = keylen;\n return key;\n}\n\nstatic bool entry_sixpacked(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p);\n return (hdr>>3)&1;\n}\n\nstatic size_t entry_extract(const struct entry *entry, const char **key,\n size_t *keylen, char buf[128], const char **val, size_t *vallen, \n int64_t *expires, uint32_t *flags, uint64_t *cas,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n if (expires) {\n memcpy(expires, p, 8);\n }\n p += 8; // expires\n } else {\n if (expires) {\n *expires = 0;\n }\n }\n if ((hdr>>1)&1) {\n if (flags) {\n memcpy(flags, p, 4);\n }\n p += 4; // flags\n } else {\n if (flags) {\n *flags = 0;\n }\n }\n if (ctx->usecas) {\n if (cas) {\n memcpy(cas, p, 8);\n }\n p += 8; // cas\n } else {\n if (cas) {\n *cas = 0;\n }\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n if (key) {\n *key = (char*)p;\n *keylen = x;\n if ((hdr>>3)&1) {\n *keylen = unsixpack(*key, (int)*keylen, buf);\n *key = buf;\n }\n }\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n if (val) {\n *val = (char*)p;\n *vallen = x;\n }\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\nstatic size_t entry_memsize(const struct entry *entry,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if (ctx->usecas) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\n// The 'cas' param should always be set to zero unless loading from disk. \n// Setting to zero will set a new unique cas to the entry.\nstatic struct entry *entry_new(const char *key, size_t keylen, const char *val,\n size_t vallen, int64_t expires, uint32_t flags, uint64_t cas,\n struct pgctx *ctx)\n{\n bool usesixpack = !ctx->nosixpack;\n#ifdef DBGCHECKENTRY\n // printf(\"entry_new(key=[%.*s], keylen=%zu, val=[%.*s], vallen=%zu, \"\n // \"expires=%\" PRId64 \", flags=%\" PRId32 \", cas=%\" PRIu64 \", \"\n // \"usesixpack=%d\\n\", (int)keylen, key, keylen, (int)vallen, key, vallen,\n // expires, flags, cas, usesixpack);\n int64_t oexpires = expires;\n uint32_t oflags = flags;\n uint64_t ocas = cas;\n const char *okey = key;\n size_t okeylen = keylen;\n const char *oval = val;\n size_t ovallen = vallen;\n#endif\n uint8_t hdr = 0;\n uint8_t keylenbuf[10];\n uint8_t vallenbuf[10];\n int nexplen, nflagslen, ncaslen, nkeylen, nvallen;\n if (expires > 0) {\n hdr |= 1;\n nexplen = 8;\n } else {\n nexplen = 0;\n }\n if (flags > 0) {\n hdr |= 2;\n nflagslen = 4;\n } else {\n nflagslen = 0;\n }\n if (ctx->usecas) {\n hdr |= 4;\n ncaslen = 8;\n } else {\n ncaslen = 0;\n }\n char buf[128];\n if (usesixpack && keylen <= 128) {\n size_t len = sixpack(key, keylen, buf);\n if (len > 0) {\n hdr |= 8;\n keylen = len;\n key = buf;\n }\n }\n nkeylen = varint_write_u64(keylenbuf, keylen);\n nvallen = varint_write_u64(vallenbuf, vallen);\n struct entry *entry_out = 0;\n size_t size = entry_struct_size()+1+sizeof(etime_t)+nexplen+nflagslen+\n ncaslen+nkeylen+keylen+nvallen+vallen;\n // printf(\"malloc=%p size=%zu, ctx=%p\\n\", ctx->malloc, size, ctx);\n void *mem = ctx->malloc(size);\n struct entry *entry = mem;\n if (!entry) {\n return 0;\n }\n uint8_t *p = (void*)entry_data(entry);\n *(p++) = hdr;\n memset(p, 0, sizeof(etime_t));\n p += sizeof(etime_t); // time\n if (nexplen > 0) {\n memcpy(p, &expires, nexplen);\n p += nexplen;\n }\n if (nflagslen > 0) {\n memcpy(p, &flags, nflagslen);\n p += nflagslen;\n }\n if (ncaslen > 0) {\n memcpy(p, &cas, ncaslen);\n p += ncaslen;\n }\n memcpy(p, keylenbuf, nkeylen);\n p += nkeylen;\n memcpy(p, key, keylen);\n p += keylen;\n memcpy(p, vallenbuf, nvallen);\n p += nvallen;\n memcpy(p, val, vallen);\n p += vallen;\n entry_out = entry;\n#ifdef DBGCHECKENTRY\n // check the key\n const char *key2, *val2;\n size_t keylen2, vallen2;\n int64_t expires2;\n uint32_t flags2;\n uint64_t cas2;\n char buf1[256];\n entry_extract(entry_out, &key2, &keylen2, buf1, &val2, &vallen2, &expires2,\n &flags2, &cas2, ctx);\n assert(expires2 == oexpires);\n assert(flags2 == oflags);\n assert(cas2 == ocas);\n assert(keylen2 == okeylen);\n assert(memcmp(key2, okey, okeylen) == 0);\n assert(vallen2 == ovallen);\n assert(memcmp(val2, oval, ovallen) == 0);\n#endif\n return entry_out;\n}\n\nstatic void entry_free(struct entry *entry, struct pgctx *ctx) {\n ctx->free(entry);\n}\n\nstatic int entry_compare(const struct entry *a, const struct entry *b) {\n size_t akeylen, bkeylen;\n char buf1[256], buf2[256];\n const char *akey;\n const char *bkey;\n if (entry_sixpacked(a) == entry_sixpacked(b)) {\n akey = entry_rawkey(a, &akeylen);\n bkey = entry_rawkey(b, &bkeylen);\n } else {\n akey = entry_key(a, &akeylen, buf1);\n bkey = entry_key(b, &bkeylen, buf2);\n }\n size_t size = akeylen < bkeylen ? akeylen : bkeylen;\n int cmp = memcmp(akey, bkey, size);\n if (cmp == 0) {\n cmp = akeylen < bkeylen ? -1 : akeylen > bkeylen;\n }\n return cmp;\n}\n\n#ifndef HASHSIZE\n#define HASHSIZE 3\n#endif\n#if HASHSIZE < 1 || HASHSIZE > 4\n#error bad hash size\n#endif\n\nstruct bucket {\n uint8_t entry[PTRSIZE]; // 48-bit pointer\n uint8_t hash[HASHSIZE]; // 24-bit hash\n uint8_t dib; // distance to bucket\n};\n\nstatic_assert(sizeof(struct bucket) == PTRSIZE+HASHSIZE+1, \"bad bucket size\");\n\nstruct map {\n int cap; // initial capacity\n int nbuckets; // number of buckets\n int count; // current entry count\n int mask; // bit mask for \n int growat;\n int shrinkat;\n struct bucket *buckets;\n uint64_t total; // current entry count\n size_t entsize; // memory size of all entries\n \n};\n\nstruct shard {\n atomic_uintptr_t lock; // spinlock (batch pointer)\n uint64_t cas; // compare and store value\n int64_t cleartime; // last clear time\n int clearcount; // number of items cleared\n struct map map; // robinhood hashmap\n // for batch linked list only\n struct shard *next;\n};\n\nstatic void lock_init(struct shard *shard) {\n atomic_init(&shard->lock, 0);\n}\n\nstruct batch {\n struct pogocache *cache; // associated cache.\n struct shard *shard; // first locked shard\n int64_t time; // timestamp\n};\n\nstruct pogocache {\n bool isbatch; \n union {\n struct pgctx ctx;\n struct batch batch;\n };\n struct shard shards[];\n};\n\nstatic struct entry *get_entry(struct bucket *bucket) {\n return load_ptr(bucket->entry);\n}\n\nstatic void set_entry(struct bucket *bucket, struct entry *entry) {\n store_ptr(bucket->entry, entry);\n}\n\n#if HASHSIZE == 1\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFF;\n}\nstatic void write_hash(uint8_t data[1], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[1]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n return hash;\n}\n#elif HASHSIZE == 2\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFF;\n}\nstatic void write_hash(uint8_t data[2], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[2]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n return hash;\n}\n#elif HASHSIZE == 3\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFFFF;\n}\nstatic void write_hash(uint8_t data[3], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[3]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n return hash;\n}\n#else \nstatic uint32_t clip_hash(uint32_t hash) {\n return hash;\n}\nstatic void write_hash(uint8_t data[4], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n data[3] = (hash>>24)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[4]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n hash |= ((uint64_t)data[3])<<24;\n return hash;\n}\n#endif\n\nstatic uint32_t get_hash(struct bucket *bucket) {\n return read_hash(bucket->hash);\n}\n\nstatic void set_hash(struct bucket *bucket, uint32_t hash) {\n write_hash(bucket->hash, hash);\n}\n\nstatic uint8_t get_dib(struct bucket *bucket) {\n return bucket->dib;\n}\n\nstatic void set_dib(struct bucket *bucket, uint8_t dib) {\n bucket->dib = dib;\n}\n\nstatic bool map_init(struct map *map, size_t cap, struct pgctx *ctx) {\n map->cap = cap;\n map->nbuckets = cap;\n map->count = 0;\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * ctx->loadfactor;\n map->shrinkat = map->nbuckets * ctx->shrinkfactor;\n size_t size = sizeof(struct bucket)*map->nbuckets;\n map->buckets = ctx->malloc(size);\n if (!map->buckets) {\n // nomem\n memset(map, 0, sizeof(struct map));\n return false;\n }\n memset(map->buckets, 0, size);\n return true;\n}\n\nstatic bool resize(struct map *map, size_t new_cap, struct pgctx *ctx) {\n struct map map2;\n if (!map_init(&map2, new_cap, ctx)) {\n return false;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket ebkt = map->buckets[i];\n if (get_dib(&ebkt)) {\n set_dib(&ebkt, 1);\n size_t j = get_hash(&ebkt) & map2.mask;\n while (1) {\n if (get_dib(&map2.buckets[j]) == 0) {\n map2.buckets[j] = ebkt;\n break;\n }\n if (get_dib(&map2.buckets[j]) < get_dib(&ebkt)) {\n struct bucket tmp = map2.buckets[j];\n map2.buckets[j] = ebkt;\n ebkt = tmp;\n }\n j = (j + 1) & map2.mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n }\n }\n int org_cap = map->cap;\n int org_count = map->count;\n ctx->free(map->buckets);\n memcpy(map, &map2, sizeof(struct map));\n map->cap = org_cap;\n map->count = org_count;\n return true;\n}\n\nstatic bool map_insert(struct map *map, struct entry *entry, uint32_t hash,\n struct entry **old, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*2, ctx)) {\n *old = 0;\n return false;\n }\n }\n map->entsize += entry_memsize(entry, ctx);\n struct bucket ebkt;\n set_entry(&ebkt, entry);\n set_hash(&ebkt, hash);\n set_dib(&ebkt, 1);\n size_t i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n // new entry\n map->buckets[i] = ebkt;\n map->count++;\n map->total++;\n *old = 0;\n return true;\n }\n if (get_hash(&ebkt) == get_hash(&map->buckets[i]) && \n entry_compare(get_entry(&ebkt), get_entry(&map->buckets[i])) == 0)\n {\n // replaced\n *old = get_entry(&map->buckets[i]);\n map->entsize -= entry_memsize(*old, ctx);\n set_entry(&map->buckets[i], get_entry(&ebkt));\n return true;\n }\n if (get_dib(&map->buckets[i]) < get_dib(&ebkt)) {\n struct bucket tmp = map->buckets[i];\n map->buckets[i] = ebkt;\n ebkt = tmp;\n }\n i = (i + 1) & map->mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n}\n\nstatic bool bucket_eq(struct map *map, size_t i, const char *key,\n size_t keylen, uint32_t hash)\n{\n if (get_hash(&map->buckets[i]) != hash) {\n return false;\n }\n size_t keylen2;\n char buf[128];\n const char *key2 = entry_key(get_entry(&map->buckets[i]), &keylen2, buf);\n return keylen == keylen2 && memcmp(key, key2, keylen) == 0;\n}\n\n// Returns the bucket index for key, or -1 if not found.\nstatic int map_get_bucket(struct map *map, const char *key, size_t keylen,\n uint32_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while (1) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n return -1;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return i;\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic struct entry *map_get_entry(struct map *map, const char *key,\n size_t keylen, uint32_t hash, int *bkt_idx_out)\n{\n int i = map_get_bucket(map, key, keylen, hash);\n *bkt_idx_out = i;\n return i >= 0 ? get_entry(&map->buckets[i]) : 0;\n}\n\n// This deletes entry from bucket and adjusts the dibs buckets to right, if\n// needed.\nstatic void delbkt(struct map *map, size_t i) {\n set_dib(&map->buckets[i], 0);\n while (1) {\n size_t h = i;\n i = (i + 1) & map->mask;\n if (get_dib(&map->buckets[i]) <= 1) {\n set_dib(&map->buckets[h], 0);\n break;\n }\n map->buckets[h] = map->buckets[i];\n set_dib(&map->buckets[h], get_dib(&map->buckets[h])-1);\n }\n map->count--;\n}\n\nstatic bool needsshrink(struct map *map, struct pgctx *ctx) {\n return ctx->allowshrink && map->nbuckets > map->cap && \n map->count <= map->shrinkat;\n}\n\n// Try to shrink the hashmap. If needed, this will allocate a new hashmap that\n// has fewer buckets and move all existing entries into the smaller map.\n// The 'multi' param is a hint that multi entries may have been deleted, such\n// as with the iter or clear operations.\n// If the resize fails due to an allocation error then the existing hashmap\n// will be retained.\nstatic void tryshrink(struct map *map, bool multi, struct pgctx *ctx) {\n if (!needsshrink(map, ctx)) {\n return;\n }\n int cap;\n if (multi) {\n // Determine how many buckets are needed to store all entries.\n cap = map->cap;\n int growat = cap * ctx->loadfactor;\n while (map->count >= growat) {\n cap *= 2;\n growat = cap * ctx->loadfactor;\n }\n } else {\n // Just half the buckets\n cap = map->nbuckets / 2;\n }\n resize(map, cap, ctx);\n}\n\n// delete an entry at bucket position. not called directly\nstatic struct entry *delentry_at_bkt(struct map *map, size_t i, \n struct pgctx *ctx)\n{\n struct entry *old = get_entry(&map->buckets[i]);\n assert(old);\n map->entsize -= entry_memsize(old, ctx);\n delbkt(map, i);\n return old;\n}\n\nstatic struct entry *map_delete(struct map *map, const char *key,\n size_t keylen, uint32_t hash, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n int i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n return 0;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return delentry_at_bkt(map, i, ctx);\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic size_t evict_entry(struct shard *shard, int shardidx, \n struct entry *entry, int64_t now, int reason, struct pgctx *ctx)\n{\n char buf[128];\n size_t keylen;\n const char *key = entry_key(entry, &keylen, buf);\n uint32_t hash = th64(key, keylen, ctx->seed);\n struct entry *del = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(del == entry); (void)del;\n if (ctx->evicted) {\n // Notify user that an entry was evicted.\n const char *val;\n size_t vallen;\n int64_t expires = 0;\n uint32_t flags = 0;\n uint64_t cas = 0;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val,\n vallen, expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n size_t size = entry_memsize(entry, ctx);\n entry_free(entry, ctx);\n return size;\n}\n\n// evict an entry using the 2-random algorithm.\n// Pick two random entries and delete the one with the oldest access time.\n// Do not evict the entry if it matches the provided hash.\nstatic void auto_evict_entry(struct shard *shard, int shardidx, uint32_t hash,\n int64_t now, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n struct map *map = &shard->map;\n struct entry *entries[2];\n int count = 0;\n for (int i = 1; i < map->nbuckets && count < 2; i++) {\n size_t j = (i+hash)&(map->nbuckets-1);\n struct bucket *bkt = &map->buckets[j];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry has expired. Evict this one instead.\n evict_entry(shard, shardidx, entry, now, reason, ctx);\n return;\n }\n if (get_hash(bkt) == hash) {\n continue;\n }\n entries[count++] = entry;\n }\n int choose;\n if (count == 1) {\n choose = 0;\n } else if (count == 2) {\n // We now have two candidates.\n if (entry_time(entries[0]) < entry_time(entries[1])) {\n choose = 0;\n } else {\n choose = 1;\n }\n } else {\n return;\n }\n evict_entry(shard, shardidx, entries[choose], now, POGOCACHE_REASON_LOWMEM,\n ctx);\n}\n\nstatic void shard_deinit(struct shard *shard, struct pgctx *ctx) {\n struct map *map = &shard->map;\n if (!map->buckets) {\n return;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n entry_free(entry, ctx);\n }\n ctx->free(map->buckets);\n}\n\nstatic bool shard_init(struct shard *shard, struct pgctx *ctx) {\n memset(shard, 0, sizeof(struct shard));\n lock_init(shard);\n shard->cas = 1;\n if (!map_init(&shard->map, INITCAP, ctx)) {\n // nomem\n shard_deinit(shard, ctx);\n return false;\n }\n return true;\n}\n\n/// Free all cache and shard hashmap allocations.\n/// This does not access the value data in any of the entries. If it is needed\n/// for the further cleanup at an entry value level, then use the\n/// pogocache_iter to perform the cleanup on each entry before calling this\n/// operation.\n/// Also this is not threadsafe. Make sure that other threads are not\n/// currently using the cache concurrently nor after this function is called.\nvoid pogocache_free(struct pogocache *cache) {\n if (!cache) {\n return;\n }\n struct pgctx *ctx = &cache->ctx;\n for (int i = 0; i < cache->ctx.nshards; i++) {\n shard_deinit(&cache->shards[i], ctx);\n }\n cache->ctx.free(cache);\n}\n\nstatic void opts_to_ctx(int nshards, struct pogocache_opts *opts,\n struct pgctx *ctx)\n{\n ctx->nshards = nshards;\n int loadfactor = 0;\n if (opts) {\n ctx->yield = opts->yield;\n ctx->evicted = opts->evicted;\n ctx->udata = opts->udata;\n ctx->usecas = opts->usecas;\n ctx->nosixpack = opts->nosixpack;\n ctx->noevict = opts->noevict;\n ctx->seed = opts->seed;\n loadfactor = opts->loadfactor;\n ctx->allowshrink = opts->allowshrink;\n ctx->usethreadbatch = opts->usethreadbatch;\n }\n // make loadfactor a floating point\n loadfactor = loadfactor == 0 ? DEFLOADFACTOR :\n loadfactor < MINLOADFACTOR_RH ? MINLOADFACTOR_RH :\n loadfactor > MAXLOADFACTOR_RH ? MAXLOADFACTOR_RH :\n loadfactor;\n ctx->loadfactor = ((double)loadfactor/100.0);\n ctx->shrinkfactor = ((double)SHRINKAT/100.0);\n}\n\nstatic struct pogocache_opts newdefopts = { 0 };\n\n/// Returns a new cache or null if there is not enough memory available.\n/// See 'pogocache_opts' for all options.\nstruct pogocache *pogocache_new(struct pogocache_opts *opts) {\n if (!opts) {\n opts = &newdefopts;\n }\n void *(*_malloc)(size_t) = opts->malloc ? opts->malloc : malloc;\n void (*_free)(void*) = opts->free ? opts->free : free;\n int shards = !opts || opts->nshards <= 0 ? DEFSHARDS : opts->nshards;\n size_t size = sizeof(struct pogocache)+shards*sizeof(struct shard);\n struct pogocache *cache = _malloc(size);\n if (!cache) {\n return 0;\n }\n memset(cache, 0, sizeof(struct pogocache));\n struct pgctx *ctx = &cache->ctx;\n opts_to_ctx(shards, opts, ctx);\n ctx->malloc = _malloc;\n ctx->free = _free;\n for (int i = 0; i < ctx->nshards; i++) {\n if (!shard_init(&cache->shards[i], ctx)) {\n // nomem\n pogocache_free(cache);\n return 0;\n }\n }\n return cache;\n}\n\nstatic int shard_index(struct pogocache *cache, uint64_t hash) {\n return (hash>>32)%cache->ctx.nshards;\n}\n\nstatic struct shard *shard_get(struct pogocache *cache, int index) {\n return &cache->shards[index];\n}\n\n/// Returns a timestamp.\nint64_t pogocache_now(void) {\n return getnow();\n}\n\nstatic __thread struct pogocache thbatch;\n\nstruct pogocache *pogocache_begin(struct pogocache *cache) {\n struct pogocache *batch;\n if (cache->ctx.usethreadbatch) {\n batch = &thbatch;\n } else {\n batch = cache->ctx.malloc(sizeof(struct pogocache));\n if (!batch) {\n return 0;\n }\n }\n batch->isbatch = true;\n batch->batch.cache = cache;\n batch->batch.shard = 0;\n batch->batch.time = 0;\n return batch;\n}\n\nvoid pogocache_end(struct pogocache *batch) {\n assert(batch->isbatch);\n struct shard *shard = batch->batch.shard;\n while (shard) {\n struct shard *next = shard->next;\n shard->next = 0;\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE);\n shard = next;\n }\n if (!batch->batch.cache->ctx.usethreadbatch) {\n batch->batch.cache->ctx.free(batch);\n }\n}\n\nstatic void lock(struct batch *batch, struct shard *shard, struct pgctx *ctx) {\n if (batch) {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n (uintptr_t)(void*)batch, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n shard->next = batch->shard;\n batch->shard = shard;\n break;\n }\n if (val == (uintptr_t)(void*)batch) {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n } else {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n UINTPTR_MAX, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n }\n}\n\nstatic bool acquire_for_scan(int shardidx, struct shard **shard_out, \n struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *shard_out = shard;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// acquire a lock for the key\nstatic bool acquire_for_key(const char *key, size_t keylen, uint32_t *hash_out,\n struct shard **shard_out, int *shardidx_out, struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n uint64_t fhash = th64(key, keylen, cache->ctx.seed);\n int shardidx = shard_index(cache, fhash);\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *hash_out = fhash;\n *shard_out = shard;\n *shardidx_out = shardidx;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// Acquire a lock on the shard for key and execute the provided operation.\n#define ACQUIRE_FOR_KEY_AND_EXECUTE(rettype, key, keylen, op) ({ \\\n int shardidx; \\\n uint32_t hash; \\\n struct shard *shard; \\\n bool usebatch = acquire_for_key((key), (keylen), &hash, &shard, &shardidx, \\\n &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)shardidx, (void)hash, (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\n// Acquire a lock on the shard at index and execute the provided operation.\n#define ACQUIRE_FOR_SCAN_AND_EXECUTE(rettype, shardidx, op) ({ \\\n struct shard *shard; \\\n bool usebatch = acquire_for_scan((shardidx), &shard, &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\nstatic int loadop(const void *key, size_t keylen, \n struct pogocache_load_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defloadopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n // Get the entry bucket index for the entry with key.\n int bidx = map_get_bucket(&shard->map, key, keylen, hash);\n if (bidx == -1) {\n return POGOCACHE_NOTFOUND;\n }\n // Extract the bucket, entry, and values.\n struct bucket *bkt = &shard->map.buckets[bidx];\n struct entry *entry = get_entry(bkt);\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. Evict the entry and clear the bucket.\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(entry, ctx);\n delbkt(&shard->map, bidx);\n return POGOCACHE_NOTFOUND;\n }\n if (!opts->notouch) {\n entry_settime(entry, now);\n }\n if (opts->entry) {\n struct pogocache_update *update = 0;\n opts->entry(shardidx, now, key, keylen, val, vallen, expires, flags,\n cas, &update, opts->udata);\n if (update) {\n // User wants to update the entry.\n shard->cas++;\n struct entry *entry2 = entry_new(key, keylen, update->value,\n update->valuelen, update->expires, update->flags, shard->cas, \n ctx);\n if (!entry2) {\n return POGOCACHE_NOMEM;\n }\n entry_settime(entry2, now);\n set_entry(bkt, entry2);\n entry_free(entry, ctx);\n }\n }\n return POGOCACHE_FOUND;\n}\n\n/// Loads an entry from the cache.\n/// Use the pogocache_load_opts.entry callback to access the value of the entry.\n/// It's possible to update the value using the 'update' param in the callback.\n/// See 'pogocache_load_opts' for all options.\n/// @returns POGOCACHE_FOUND when the entry was found.\n/// @returns POGOCACHE_NOMEM when the entry cannot be updated due to no memory.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\nint pogocache_load(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_load_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen, \n loadop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int deleteop(const void *key, size_t keylen, \n struct pogocache_delete_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defdeleteopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n struct entry *entry = map_delete(&shard->map, key, keylen, hash, ctx);\n if (!entry) {\n // Entry does not exist\n return POGOCACHE_NOTFOUND;\n }\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. It was already deleted from the map but\n // we still need to notify the user.\n if (ctx->evicted) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (opts->entry) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen,\n expires, flags, cas, opts->udata))\n {\n // User canceled the delete. Put it back into the map.\n // This insert will not cause an allocation error because the \n // previous delete operation left us with at least one available\n // bucket.\n struct entry *old;\n bool ok = map_insert(&shard->map, entry, hash, &old, ctx);\n assert(ok); (void)ok;\n assert(!old);\n return POGOCACHE_CANCELED;\n }\n }\n // Entry was successfully deleted.\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_DELETED;\n}\n\n/// Deletes an entry from the cache.\n/// See 'pogocache_delete_opts' for all options.\n/// @returns POGOCACHE_DELETED when the entry was successfully deleted.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\n/// @returns POGOCACHE_CANCELED when opts.entry callback returned false.\nint pogocache_delete(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_delete_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n deleteop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int storeop(const void *key, size_t keylen, const void *val,\n size_t vallen, struct pogocache_store_opts *opts, struct shard *shard,\n int shardidx, uint32_t hash, struct pgctx *ctx)\n{\n int count = shard->map.count;\n opts = opts ? opts : &defstoreopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int64_t expires = 0;\n if (opts->expires > 0) {\n expires = opts->expires;\n } else if (opts->ttl > 0) {\n expires = int64_add_clamp(now, opts->ttl);\n }\n if (opts->keepttl) {\n // User wants to keep the existing ttl. Get the existing entry from the\n // map first and take its expiration.\n int i;\n struct entry *old = map_get_entry(&shard->map, key, keylen, hash, &i);\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason == 0) {\n expires = entry_expires(old);\n }\n }\n }\n shard->cas++;\n struct entry *entry = entry_new(key, keylen, val, vallen, expires,\n opts->flags, shard->cas, ctx);\n if (!entry) {\n goto nomem;\n }\n entry_settime(entry, now);\n if (opts->lowmem && ctx->noevict) {\n goto nomem;\n }\n // Insert new entry into map\n struct entry *old;\n if (!map_insert(&shard->map, entry, hash, &old, ctx)) {\n goto nomem;\n }\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason) {\n // There's an old entry, but it's no longer alive.\n // Treat this like an eviction and notify the user.\n if (ctx->evicted) {\n const char *oval;\n size_t ovallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0,\n &oval, &ovallen, &oexpires, &oflags, &ocas, ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, oval, ovallen,\n oexpires, oflags, ocas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(old, ctx);\n old = 0;\n }\n }\n int put_back_status = 0;\n if (old) {\n if (opts->casop) {\n // User is requesting the cas operation.\n if (ctx->usecas) {\n uint64_t old_cas = entry_cas(old);\n if (opts->cas != old_cas) {\n // CAS test failed.\n // printf(\". cas failed: expected %\" PRIu64 \", \"\n // \"got %\" PRIu64 \"\\n\", cas, old_cas);\n put_back_status = POGOCACHE_FOUND;\n }\n } else {\n put_back_status = POGOCACHE_FOUND;\n }\n } else if (opts->nx) {\n put_back_status = POGOCACHE_FOUND;\n }\n if (put_back_status) {\n put_back:;\n // The entry needs be put back into the map and operation must\n // return early.\n // This insert operation must not fail since the entry 'e' and\n // 'old' both exist and will always be bucket swapped. There will\n // never be a new allocation.\n struct entry *e = 0;\n bool ok = map_insert(&shard->map, old, hash, &e, ctx);\n assert(ok); (void)ok;\n assert(e == entry);\n entry_free(entry, ctx);\n return put_back_status;\n }\n } else if (opts->xx || opts->casop) {\n // The new entry must not be inserted.\n // Delete it and return early.\n struct entry *e = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(e == entry); (void)e;\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (old && opts->entry) {\n // User is requesting to verify the old entry before allowing it to be\n // replaced by the new entry.\n const char *val;\n size_t vallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0, &val, &vallen, &oexpires, &oflags, &ocas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen, oexpires,\n oflags, ocas, opts->udata))\n {\n // User wants to keep the old entry.\n put_back_status = POGOCACHE_CANCELED;\n goto put_back;\n }\n }\n // The new entry was inserted.\n if (old) {\n entry_free(old, ctx);\n return POGOCACHE_REPLACED;\n } else {\n if (opts->lowmem && shard->map.count > count) {\n // The map grew by one bucket, yet the user indicates that there is\n // a low memory event. Evict one entry.\n auto_evict_entry(shard, shardidx, hash, now, ctx);\n }\n return POGOCACHE_INSERTED;\n }\nnomem:\n entry_free(entry, ctx);\n return POGOCACHE_NOMEM;\n}\n\n/// Insert or replace an entry in the cache.\n/// If an entry with the same key already exists then the cache then the \n/// the opts.entry callback can be used to check the existing\n/// value first, allowing the operation to be canceled.\n/// See 'pogocache_store_opts' for all options.\n/// @returns POGOCACHE_INSERTED when the entry was inserted.\n/// @returns POGOCACHE_REPLACED when the entry replaced an existing one.\n/// @returns POGOCACHE_FOUND when the entry already exists. (cas/nx)\n/// @returns POGOCACHE_CANCELED when the operation was canceled.\n/// @returns POGOCACHE_NOMEM when there is system memory available.\nint pogocache_store(struct pogocache *cache, const void *key, size_t keylen, \n const void *val, size_t vallen, struct pogocache_store_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n storeop(key, keylen, val, vallen, opts, shard, shardidx, hash, ctx)\n );\n}\n\n\nstatic struct pogocache *rootcache(struct pogocache *cache) {\n return cache->isbatch ? cache->batch.cache : cache;\n}\n\n/// Returns the number of shards in cache\nint pogocache_nshards(struct pogocache *cache) {\n cache = rootcache(cache);\n return cache->ctx.nshards;\n}\n\nstatic int iterop(struct shard *shard, int shardidx, int64_t now,\n struct pogocache_iter_opts *opts, struct pgctx *ctx)\n{\n char buf[128];\n int status = POGOCACHE_FINISHED;\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen,\n &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n#ifdef EVICTONITER\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n // Delete entry at bucket.\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n#endif\n } else {\n // Entry is alive, check with user for next action.\n int action = POGOCACHE_ITER_CONTINUE;\n if (opts->entry) {\n action = opts->entry(shardidx, now, key, keylen, val,\n vallen, expires, flags, cas, opts->udata);\n }\n if (action != POGOCACHE_ITER_CONTINUE) {\n if (action&POGOCACHE_ITER_DELETE) {\n // Delete entry at bucket\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n }\n if (action&POGOCACHE_ITER_STOP) {\n status = POGOCACHE_CANCELED;\n break;\n }\n }\n }\n }\n tryshrink(&shard->map, true, ctx);\n return status;\n}\n\n/// Iterate over entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The pogocache_iter_opts.entry callback can be used to perform actions such\n/// as: deleting entries and stopping iteration early. \n/// See 'pogocache_iter_opts' for all options.\n/// @return POGOCACHE_FINISHED if iteration completed\n/// @return POGOCACHE_CANCELED if iteration stopped early\nint pogocache_iter(struct pogocache *cache, struct pogocache_iter_opts *opts) {\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defiteropts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return POGOCACHE_FINISHED;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n iterop(shard, opts->oneshardidx, now, opts, &cache->ctx)\n );\n }\n for (int i = 0; i < nshards; i++) {\n int status = ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n iterop(shard, i, now, opts, &cache->ctx)\n );\n if (status != POGOCACHE_FINISHED) {\n return status;\n }\n }\n return POGOCACHE_FINISHED;\n}\n\nstatic size_t countop(struct shard *shard) {\n return shard->map.count - shard->clearcount;\n}\n\n/// Returns the number of entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_count(struct pogocache *cache,\n struct pogocache_count_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defcountopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n countop(shard);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n countop(shard);\n );\n }\n return count;\n}\n\nstatic uint64_t totalop(struct shard *shard) {\n return shard->map.total;\n}\n\n/// Returns the total number of entries that have ever been stored in the cache.\n/// For the current number of entries use pogocache_count().\n/// There's an option to allow for isolating the operation to a single shard.\nuint64_t pogocache_total(struct pogocache *cache,\n struct pogocache_total_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &deftotalopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, opts->oneshardidx,\n totalop(shard);\n );\n }\n uint64_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, i,\n totalop(shard);\n );\n }\n return count;\n}\n\nstatic size_t sizeop(struct shard *shard, bool entriesonly) {\n size_t size = 0;\n if (!entriesonly) {\n size += sizeof(struct shard);\n size += sizeof(struct bucket)*shard->map.nbuckets;\n }\n size += shard->map.entsize;\n return size;\n}\n\n/// Returns the total memory size of the shard.\n/// This includes the memory size of all data structures and entries.\n/// Use the entriesonly option to limit the result to only the entries.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_size(struct pogocache *cache,\n struct pogocache_size_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsizeopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n sizeop(shard, opts->entriesonly);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n sizeop(shard, opts->entriesonly);\n );\n }\n return count;\n}\n\n\n\nstatic int sweepop(struct shard *shard, int shardidx, int64_t now,\n size_t *swept, size_t *kept, struct pgctx *ctx)\n{\n char buf[128];\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int64_t expires = entry_expires(entry);\n int64_t etime = entry_time(entry);\n int reason = entry_alive_exp(expires, etime, now, shard->cleartime);\n if (reason == 0) {\n // entry is still alive\n (*kept)++;\n continue;\n }\n // entry is no longer alive.\n if (ctx->evicted) {\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen, &expires,\n &flags, &cas, ctx);\n // Report eviction to user\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n (*swept)++;\n // Entry was deleted from bucket, which may move entries to the right\n // over one bucket to the left. So we need to check the same bucket\n // again.\n i--;\n }\n tryshrink(&shard->map, true, ctx);\n return 0;\n}\n\n/// Remove expired entries from the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The final 'kept' or 'swept' counts are returned.\n/// @return POGOCACHE_FINISHED when iteration completed\n/// @return POGOCACHE_CANCELED when iteration stopped early\nvoid pogocache_sweep(struct pogocache *cache, size_t *swept, size_t *kept, \n struct pogocache_sweep_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweepopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n size_t sweptc = 0;\n size_t keptc = 0;\n if (opts->oneshard) {\n if (opts->oneshardidx >= 0 && opts->oneshardidx < nshards) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n sweepop(shard, opts->oneshardidx, now, &sweptc, &keptc,\n &cache->ctx);\n );\n }\n } else {\n for (int i = 0; i < nshards; i++) {\n size_t sweptc2 = 0;\n size_t keptc2 = 0;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n sweepop(shard, i, now, &sweptc2, &keptc2, &cache->ctx);\n );\n sweptc += sweptc2;\n keptc += keptc2;\n }\n }\n if (swept) {\n *swept = sweptc;\n }\n if (kept) {\n *kept = keptc;\n }\n}\n\nstatic int clearop(struct shard *shard, int shardidx, int64_t now, \n struct pgctx *ctx)\n{\n (void)shardidx, (void)ctx;\n shard->cleartime = now;\n shard->clearcount += (shard->map.count-shard->clearcount);\n return 0;\n}\n\n/// Clear the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nvoid pogocache_clear(struct pogocache *cache, struct pogocache_clear_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defclearopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return;\n }\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n clearop(shard, opts->oneshardidx, now, &cache->ctx);\n );\n return;\n }\n for (int i = 0; i < cache->ctx.nshards; i++) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n clearop(shard, i, now, &cache->ctx);\n );\n }\n}\n\nstatic int sweeppollop(struct shard *shard, int shardidx, int64_t now, \n int pollsize, double *percent)\n{\n // start at random bucket\n int count = 0;\n int dead = 0;\n int bidx = mix13(now+shardidx)%shard->map.nbuckets;\n for (int i = 0; i < shard->map.nbuckets && count < pollsize; i++) {\n struct bucket *bkt = &shard->map.buckets[(bidx+i)%shard->map.nbuckets];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n count++;\n dead += (entry_alive(entry, now, shard->cleartime) != 0);\n }\n if (count == 0) {\n *percent = 0;\n return 0;\n }\n *percent = (double)dead/(double)count;\n return 0;\n}\n\ndouble pogocache_sweep_poll(struct pogocache *cache, \n struct pogocache_sweep_poll_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweeppollopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int pollsize = opts->pollsize == 0 ? 20 : opts->pollsize;\n \n // choose a random shard\n int shardidx = mix13(now)%nshards;\n double percent;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, shardidx,\n sweeppollop(shard, shardidx, now, pollsize, &percent);\n );\n return percent;\n}\n"], ["/pogocache/src/lz4.c", "/*\n LZ4 - Fast LZ compression algorithm\n Copyright (C) 2011-2023, Yann Collet.\n\n BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following disclaimer\n in the documentation and/or other materials provided with the\n distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n You can contact the author at :\n - LZ4 homepage : http://www.lz4.org\n - LZ4 source repository : https://github.com/lz4/lz4\n*/\n\n/*-************************************\n* Tuning parameters\n**************************************/\n/*\n * LZ4_HEAPMODE :\n * Select how stateless compression functions like `LZ4_compress_default()`\n * allocate memory for their hash table,\n * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).\n */\n#ifndef LZ4_HEAPMODE\n# define LZ4_HEAPMODE 0\n#endif\n\n/*\n * LZ4_ACCELERATION_DEFAULT :\n * Select \"acceleration\" for LZ4_compress_fast() when parameter value <= 0\n */\n#define LZ4_ACCELERATION_DEFAULT 1\n/*\n * LZ4_ACCELERATION_MAX :\n * Any \"acceleration\" value higher than this threshold\n * get treated as LZ4_ACCELERATION_MAX instead (fix #876)\n */\n#define LZ4_ACCELERATION_MAX 65537\n\n\n/*-************************************\n* CPU Feature Detection\n**************************************/\n/* LZ4_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n * It can generate buggy code on targets which assembly generation depends on alignment.\n * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */\n# if defined(__GNUC__) && \\\n ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \\\n || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n# define LZ4_FORCE_MEMORY_ACCESS 2\n# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)\n# define LZ4_FORCE_MEMORY_ACCESS 1\n# endif\n#endif\n\n/*\n * LZ4_FORCE_SW_BITCOUNT\n * Define this parameter if your target system or compiler does not support hardware bit count\n */\n#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */\n# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */\n# define LZ4_FORCE_SW_BITCOUNT\n#endif\n\n\n\n/*-************************************\n* Dependency\n**************************************/\n/*\n * LZ4_SRC_INCLUDED:\n * Amalgamation flag, whether lz4.c is included\n */\n#ifndef LZ4_SRC_INCLUDED\n# define LZ4_SRC_INCLUDED 1\n#endif\n\n#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS\n# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */\n#endif\n\n#ifndef LZ4_STATIC_LINKING_ONLY\n# define LZ4_STATIC_LINKING_ONLY\n#endif\n#include \"lz4.h\"\n/* see also \"memory routines\" below */\n\n\n/*-************************************\n* Compiler Options\n**************************************/\n#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */\n# include /* only present in VS2005+ */\n# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */\n# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */\n# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */\n#endif /* _MSC_VER */\n\n#ifndef LZ4_FORCE_INLINE\n# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */\n# define LZ4_FORCE_INLINE static __forceinline\n# else\n# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */\n# if defined (__GNUC__) || defined (__clang__)\n# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))\n# else\n# define LZ4_FORCE_INLINE static inline\n# endif\n# else\n# define LZ4_FORCE_INLINE static\n# endif /* __STDC_VERSION__ */\n# endif /* _MSC_VER */\n#endif /* LZ4_FORCE_INLINE */\n\n/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE\n * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,\n * together with a simple 8-byte copy loop as a fall-back path.\n * However, this optimization hurts the decompression speed by >30%,\n * because the execution does not go to the optimized loop\n * for typical compressible data, and all of the preamble checks\n * before going to the fall-back path become useless overhead.\n * This optimization happens only with the -O3 flag, and -O2 generates\n * a simple 8-byte copy loop.\n * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8\n * functions are annotated with __attribute__((optimize(\"O2\"))),\n * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute\n * of LZ4_wildCopy8 does not affect the compression speed.\n */\n#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)\n# define LZ4_FORCE_O2 __attribute__((optimize(\"O2\")))\n# undef LZ4_FORCE_INLINE\n# define LZ4_FORCE_INLINE static __inline __attribute__((optimize(\"O2\"),always_inline))\n#else\n# define LZ4_FORCE_O2\n#endif\n\n#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)\n# define expect(expr,value) (__builtin_expect ((expr),(value)) )\n#else\n# define expect(expr,value) (expr)\n#endif\n\n#ifndef likely\n#define likely(expr) expect((expr) != 0, 1)\n#endif\n#ifndef unlikely\n#define unlikely(expr) expect((expr) != 0, 0)\n#endif\n\n/* Should the alignment test prove unreliable, for some reason,\n * it can be disabled by setting LZ4_ALIGN_TEST to 0 */\n#ifndef LZ4_ALIGN_TEST /* can be externally provided */\n# define LZ4_ALIGN_TEST 1\n#endif\n\n\n/*-************************************\n* Memory routines\n**************************************/\n\n/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :\n * Disable relatively high-level LZ4/HC functions that use dynamic memory\n * allocation functions (malloc(), calloc(), free()).\n *\n * Note that this is a compile-time switch. And since it disables\n * public/stable LZ4 v1 API functions, we don't recommend using this\n * symbol to generate a library for distribution.\n *\n * The following public functions are removed when this symbol is defined.\n * - lz4 : LZ4_createStream, LZ4_freeStream,\n * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)\n * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,\n * LZ4_createHC (deprecated), LZ4_freeHC (deprecated)\n * - lz4frame, lz4file : All LZ4F_* functions\n */\n#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\n# define ALLOC(s) lz4_error_memory_allocation_is_disabled\n# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled\n# define FREEMEM(p) lz4_error_memory_allocation_is_disabled\n#elif defined(LZ4_USER_MEMORY_FUNCTIONS)\n/* memory management functions can be customized by user project.\n * Below functions must exist somewhere in the Project\n * and be available at link time */\nvoid* LZ4_malloc(size_t s);\nvoid* LZ4_calloc(size_t n, size_t s);\nvoid LZ4_free(void* p);\n# define ALLOC(s) LZ4_malloc(s)\n# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)\n# define FREEMEM(p) LZ4_free(p)\n#else\n# include /* malloc, calloc, free */\n# define ALLOC(s) malloc(s)\n# define ALLOC_AND_ZERO(s) calloc(1,s)\n# define FREEMEM(p) free(p)\n#endif\n\n#if ! LZ4_FREESTANDING\n# include /* memset, memcpy */\n#endif\n#if !defined(LZ4_memset)\n# define LZ4_memset(p,v,s) memset((p),(v),(s))\n#endif\n#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s))\n\n\n/*-************************************\n* Common Constants\n**************************************/\n#define MINMATCH 4\n\n#define WILDCOPYLENGTH 8\n#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */\n#define FASTLOOP_SAFE_DISTANCE 64\nstatic const int LZ4_minLength = (MFLIMIT+1);\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define LZ4_DISTANCE_ABSOLUTE_MAX 65535\n#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */\n# error \"LZ4_DISTANCE_MAX is too big : must be <= 65535\"\n#endif\n\n#define ML_BITS 4\n#define ML_MASK ((1U<=1)\n# include \n#else\n# ifndef assert\n# define assert(condition) ((void)0)\n# endif\n#endif\n\n#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */\n\n#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)\n# include \n static int g_debuglog_enable = 1;\n# define DEBUGLOG(l, ...) { \\\n if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \\\n fprintf(stderr, __FILE__ \" %i: \", __LINE__); \\\n fprintf(stderr, __VA_ARGS__); \\\n fprintf(stderr, \" \\n\"); \\\n } }\n#else\n# define DEBUGLOG(l, ...) {} /* disabled */\n#endif\n\nstatic int LZ4_isAligned(const void* ptr, size_t alignment)\n{\n return ((size_t)ptr & (alignment -1)) == 0;\n}\n\n\n/*-************************************\n* Types\n**************************************/\n#include \n#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include \n typedef uint8_t BYTE;\n typedef uint16_t U16;\n typedef uint32_t U32;\n typedef int32_t S32;\n typedef uint64_t U64;\n typedef uintptr_t uptrval;\n#else\n# if UINT_MAX != 4294967295UL\n# error \"LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4\"\n# endif\n typedef unsigned char BYTE;\n typedef unsigned short U16;\n typedef unsigned int U32;\n typedef signed int S32;\n typedef unsigned long long U64;\n typedef size_t uptrval; /* generally true, except OpenVMS-64 */\n#endif\n\n#if defined(__x86_64__)\n typedef U64 reg_t; /* 64-bits in x32 mode */\n#else\n typedef size_t reg_t; /* 32-bits in x32 mode */\n#endif\n\ntypedef enum {\n notLimited = 0,\n limitedOutput = 1,\n fillOutput = 2\n} limitedOutput_directive;\n\n\n/*-************************************\n* Reading and writing into memory\n**************************************/\n\n/**\n * LZ4 relies on memcpy with a constant size being inlined. In freestanding\n * environments, the compiler can't assume the implementation of memcpy() is\n * standard compliant, so it can't apply its specialized memcpy() inlining\n * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze\n * memcpy() as if it were standard compliant, so it can inline it in freestanding\n * environments. This is needed when decompressing the Linux Kernel, for example.\n */\n#if !defined(LZ4_memcpy)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)\n# else\n# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)\n# endif\n#endif\n\n#if !defined(LZ4_memmove)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memmove __builtin_memmove\n# else\n# define LZ4_memmove memmove\n# endif\n#endif\n\nstatic unsigned LZ4_isLittleEndian(void)\n{\n const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */\n return one.c[0];\n}\n\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))\n#elif defined(_MSC_VER)\n#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))\n#endif\n\n#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)\n/* lie to the compiler about data alignment; use with caution */\n\nstatic U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }\nstatic U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }\nstatic reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }\n\n#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\nLZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;\nLZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;\nLZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;\n\nstatic U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }\nstatic U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }\nstatic reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }\n\n#else /* safe and portable access using memcpy() */\n\nstatic U16 LZ4_read16(const void* memPtr)\n{\n U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic U32 LZ4_read32(const void* memPtr)\n{\n U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic reg_t LZ4_read_ARCH(const void* memPtr)\n{\n reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic void LZ4_write16(void* memPtr, U16 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\nstatic void LZ4_write32(void* memPtr, U32 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* LZ4_FORCE_MEMORY_ACCESS */\n\n\nstatic U16 LZ4_readLE16(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read16(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U16)((U16)p[0] | (p[1]<<8));\n }\n}\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\nstatic U32 LZ4_readLE32(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read32(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);\n }\n}\n#endif\n\nstatic void LZ4_writeLE16(void* memPtr, U16 value)\n{\n if (LZ4_isLittleEndian()) {\n LZ4_write16(memPtr, value);\n } else {\n BYTE* p = (BYTE*)memPtr;\n p[0] = (BYTE) value;\n p[1] = (BYTE)(value>>8);\n }\n}\n\n/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */\nLZ4_FORCE_INLINE\nvoid LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */\nLZ4_FORCE_INLINE void\nLZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH\n * - there is at least 12 bytes available to write after dstEnd */\nLZ4_FORCE_INLINE void\nLZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)\n{\n BYTE v[8];\n\n assert(dstEnd >= dstPtr + MINMATCH);\n\n switch(offset) {\n case 1:\n MEM_INIT(v, *srcPtr, 8);\n break;\n case 2:\n LZ4_memcpy(v, srcPtr, 2);\n LZ4_memcpy(&v[2], srcPtr, 2);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(push)\n# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */\n#endif\n LZ4_memcpy(&v[4], v, 4);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(pop)\n#endif\n break;\n case 4:\n LZ4_memcpy(v, srcPtr, 4);\n LZ4_memcpy(&v[4], srcPtr, 4);\n break;\n default:\n LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);\n return;\n }\n\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n while (dstPtr < dstEnd) {\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n }\n}\n#endif\n\n\n/*-************************************\n* Common functions\n**************************************/\nstatic unsigned LZ4_NbCommonBytes (reg_t val)\n{\n assert(val != 0);\n if (LZ4_isLittleEndian()) {\n if (sizeof(val) == 8) {\n# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)\n/*-*************************************************************************************************\n* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.\n* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics\n* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.\n****************************************************************************************************/\n# if defined(__clang__) && (__clang_major__ < 10)\n /* Avoid undefined clang-cl intrinsics issue.\n * See https://github.com/lz4/lz4/pull/1017 for details. */\n return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;\n# else\n /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */\n return (unsigned)_tzcnt_u64(val) >> 3;\n# endif\n# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r = 0;\n _BitScanForward64(&r, (U64)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctzll((U64)val) >> 3;\n# else\n const U64 m = 0x0101010101010101ULL;\n val ^= val - 1;\n return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);\n# endif\n } else /* 32 bits */ {\n# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r;\n _BitScanForward(&r, (U32)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctz((U32)val) >> 3;\n# else\n const U32 m = 0x01010101;\n return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;\n# endif\n }\n } else /* Big Endian CPU */ {\n if (sizeof(val)==8) {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clzll((U64)val) >> 3;\n# else\n#if 1\n /* this method is probably faster,\n * but adds a 128 bytes lookup table */\n static const unsigned char ctz7_tab[128] = {\n 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n };\n U64 const mask = 0x0101010101010101ULL;\n U64 const t = (((val >> 8) - mask) | val) & mask;\n return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];\n#else\n /* this method doesn't consume memory space like the previous one,\n * but it contains several branches,\n * that may end up slowing execution */\n static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.\n Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.\n Note that this code path is never triggered in 32-bits mode. */\n unsigned r;\n if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }\n if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\n r += (!val);\n return r;\n#endif\n# endif\n } else /* 32 bits */ {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clz((U32)val) >> 3;\n# else\n val >>= 8;\n val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |\n (val + 0x00FF0000)) >> 24;\n return (unsigned)val ^ 3;\n# endif\n }\n }\n}\n\n\n#define STEPSIZE sizeof(reg_t)\nLZ4_FORCE_INLINE\nunsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)\n{\n const BYTE* const pStart = pIn;\n\n if (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) {\n pIn+=STEPSIZE; pMatch+=STEPSIZE;\n } else {\n return LZ4_NbCommonBytes(diff);\n } }\n\n while (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }\n pIn += LZ4_NbCommonBytes(diff);\n return (unsigned)(pIn - pStart);\n }\n\n if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }\n if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }\n if ((pIn compression run slower on incompressible data */\n\n\n/*-************************************\n* Local Structures and types\n**************************************/\ntypedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;\n\n/**\n * This enum distinguishes several different modes of accessing previous\n * content in the stream.\n *\n * - noDict : There is no preceding content.\n * - withPrefix64k : Table entries up to ctx->dictSize before the current blob\n * blob being compressed are valid and refer to the preceding\n * content (of length ctx->dictSize), which is available\n * contiguously preceding in memory the content currently\n * being compressed.\n * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere\n * else in memory, starting at ctx->dictionary with length\n * ctx->dictSize.\n * - usingDictCtx : Everything concerning the preceding content is\n * in a separate context, pointed to by ctx->dictCtx.\n * ctx->dictionary, ctx->dictSize, and table entries\n * in the current context that refer to positions\n * preceding the beginning of the current compression are\n * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx\n * ->dictSize describe the location and size of the preceding\n * content, and matches are found by looking in the ctx\n * ->dictCtx->hashTable.\n */\ntypedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;\ntypedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;\n\n\n/*-************************************\n* Local Utils\n**************************************/\nint LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }\nconst char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }\nint LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }\nint LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }\n\n\n/*-****************************************\n* Internal Definitions, used only in Tests\n*******************************************/\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);\n\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize);\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize);\n#if defined (__cplusplus)\n}\n#endif\n\n/*-******************************\n* Compression functions\n********************************/\nLZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)\n{\n if (tableType == byU16)\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));\n else\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)\n{\n const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;\n if (LZ4_isLittleEndian()) {\n const U64 prime5bytes = 889523592379ULL;\n return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));\n } else {\n const U64 prime8bytes = 11400714785074694791ULL;\n return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));\n }\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)\n{\n if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\n return LZ4_hash4(LZ4_readLE32(p), tableType);\n#else\n return LZ4_hash4(LZ4_read32(p), tableType);\n#endif\n}\n\nLZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: { /* illegal! */ assert(0); return; }\n case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }\n }\n}\n\nLZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: /* fallthrough */\n case byPtr: { /* illegal! */ assert(0); return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }\n }\n}\n\n/* LZ4_putPosition*() : only used in byPtr mode */\nLZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,\n void* tableBase, tableType_t const tableType)\n{\n const BYTE** const hashTable = (const BYTE**)tableBase;\n assert(tableType == byPtr); (void)tableType;\n hashTable[h] = p;\n}\n\nLZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n LZ4_putPositionOnHash(p, h, tableBase, tableType);\n}\n\n/* LZ4_getIndexOnHash() :\n * Index of match position registered in hash table.\n * hash position must be calculated by using base+index, or dictBase+index.\n * Assumption 1 : only valid if tableType == byU32 or byU16.\n * Assumption 2 : h is presumed valid (within limits of hash table)\n */\nLZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);\n if (tableType == byU32) {\n const U32* const hashTable = (const U32*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-2)));\n return hashTable[h];\n }\n if (tableType == byU16) {\n const U16* const hashTable = (const U16*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-1)));\n return hashTable[h];\n }\n assert(0); return 0; /* forbidden case */\n}\n\nstatic const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n assert(tableType == byPtr); (void)tableType;\n { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }\n}\n\nLZ4_FORCE_INLINE const BYTE*\nLZ4_getPosition(const BYTE* p,\n const void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n return LZ4_getPositionOnHash(h, tableBase, tableType);\n}\n\nLZ4_FORCE_INLINE void\nLZ4_prepareTable(LZ4_stream_t_internal* const cctx,\n const int inputSize,\n const tableType_t tableType) {\n /* If the table hasn't been used, it's guaranteed to be zeroed out, and is\n * therefore safe to use no matter what mode we're in. Otherwise, we figure\n * out if it's safe to leave as is or whether it needs to be reset.\n */\n if ((tableType_t)cctx->tableType != clearedTable) {\n assert(inputSize >= 0);\n if ((tableType_t)cctx->tableType != tableType\n || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)\n || ((tableType == byU32) && cctx->currentOffset > 1 GB)\n || tableType == byPtr\n || inputSize >= 4 KB)\n {\n DEBUGLOG(4, \"LZ4_prepareTable: Resetting table in %p\", cctx);\n MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);\n cctx->currentOffset = 0;\n cctx->tableType = (U32)clearedTable;\n } else {\n DEBUGLOG(4, \"LZ4_prepareTable: Re-use hash table (no reset)\");\n }\n }\n\n /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,\n * is faster than compressing without a gap.\n * However, compressing with currentOffset == 0 is faster still,\n * so we preserve that case.\n */\n if (cctx->currentOffset != 0 && tableType == byU32) {\n DEBUGLOG(5, \"LZ4_prepareTable: adding 64KB to currentOffset\");\n cctx->currentOffset += 64 KB;\n }\n\n /* Finally, clear history */\n cctx->dictCtx = NULL;\n cctx->dictionary = NULL;\n cctx->dictSize = 0;\n}\n\n/** LZ4_compress_generic_validated() :\n * inlined, to ensure branches are decided at compilation time.\n * The following conditions are presumed already validated:\n * - source != NULL\n * - inputSize > 0\n */\nLZ4_FORCE_INLINE int LZ4_compress_generic_validated(\n LZ4_stream_t_internal* const cctx,\n const char* const source,\n char* const dest,\n const int inputSize,\n int* inputConsumed, /* only written when outputDirective == fillOutput */\n const int maxOutputSize,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n int result;\n const BYTE* ip = (const BYTE*)source;\n\n U32 const startIndex = cctx->currentOffset;\n const BYTE* base = (const BYTE*)source - startIndex;\n const BYTE* lowLimit;\n\n const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;\n const BYTE* const dictionary =\n dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;\n const U32 dictSize =\n dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;\n const U32 dictDelta =\n (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */\n\n int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);\n U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */\n const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;\n const BYTE* anchor = (const BYTE*) source;\n const BYTE* const iend = ip + inputSize;\n const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;\n const BYTE* const matchlimit = iend - LASTLITERALS;\n\n /* the dictCtx currentOffset is indexed on the start of the dictionary,\n * while a dictionary in the current context precedes the currentOffset */\n const BYTE* dictBase = (dictionary == NULL) ? NULL :\n (dictDirective == usingDictCtx) ?\n dictionary + dictSize - dictCtx->currentOffset :\n dictionary + dictSize - startIndex;\n\n BYTE* op = (BYTE*) dest;\n BYTE* const olimit = op + maxOutputSize;\n\n U32 offset = 0;\n U32 forwardH;\n\n DEBUGLOG(5, \"LZ4_compress_generic_validated: srcSize=%i, tableType=%u\", inputSize, tableType);\n assert(ip != NULL);\n if (tableType == byU16) assert(inputSize= 1);\n\n lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);\n\n /* Update context state */\n if (dictDirective == usingDictCtx) {\n /* Subsequent linked blocks can't use the dictionary. */\n /* Instead, they use the block we just compressed. */\n cctx->dictCtx = NULL;\n cctx->dictSize = (U32)inputSize;\n } else {\n cctx->dictSize += (U32)inputSize;\n }\n cctx->currentOffset += (U32)inputSize;\n cctx->tableType = (U32)tableType;\n\n if (inputSizehashTable, byPtr);\n } else {\n LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);\n } }\n ip++; forwardH = LZ4_hashPosition(ip, tableType);\n\n /* Main Loop */\n for ( ; ; ) {\n const BYTE* match;\n BYTE* token;\n const BYTE* filledIp;\n\n /* Find a match */\n if (tableType == byPtr) {\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);\n\n } while ( (match+LZ4_DISTANCE_MAX < ip)\n || (LZ4_read32(match) != LZ4_read32(ip)) );\n\n } else { /* byU32, byU16 */\n\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n U32 const current = (U32)(forwardIp - base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex <= current);\n assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n matchIndex += dictDelta; /* make dictCtx index comparable with current context */\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else if (dictDirective == usingExtDict) {\n if (matchIndex < startIndex) {\n DEBUGLOG(7, \"extDict candidate: matchIndex=%5u < startIndex=%5u\", matchIndex, startIndex);\n assert(startIndex - matchIndex >= MINMATCH);\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else { /* single continuous memory segment */\n match = base + matchIndex;\n }\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n\n DEBUGLOG(7, \"candidate at pos=%u (offset=%u \\n\", matchIndex, current - matchIndex);\n if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */\n assert(matchIndex < current);\n if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))\n && (matchIndex+LZ4_DISTANCE_MAX < current)) {\n continue;\n } /* too far */\n assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */\n\n if (LZ4_read32(match) == LZ4_read32(ip)) {\n if (maybe_extMem) offset = current - matchIndex;\n break; /* match found */\n }\n\n } while(1);\n }\n\n /* Catch up */\n filledIp = ip;\n assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */\n if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {\n do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));\n }\n\n /* Encode Literals */\n { unsigned const litLength = (unsigned)(ip - anchor);\n token = op++;\n if ((outputDirective == limitedOutput) && /* Check output buffer overflow */\n (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n if ((outputDirective == fillOutput) &&\n (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {\n op--;\n goto _last_literals;\n }\n if (litLength >= RUN_MASK) {\n unsigned len = litLength - RUN_MASK;\n *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255;\n *op++ = (BYTE)len;\n }\n else *token = (BYTE)(litLength< olimit)) {\n /* the match was too close to the end, rewind and go to last literals */\n op = token;\n goto _last_literals;\n }\n\n /* Encode Offset */\n if (maybe_extMem) { /* static test */\n DEBUGLOG(6, \" with offset=%u (ext if > %i)\", offset, (int)(ip - (const BYTE*)source));\n assert(offset <= LZ4_DISTANCE_MAX && offset > 0);\n LZ4_writeLE16(op, (U16)offset); op+=2;\n } else {\n DEBUGLOG(6, \" with offset=%u (same segment)\", (U32)(ip - match));\n assert(ip-match <= LZ4_DISTANCE_MAX);\n LZ4_writeLE16(op, (U16)(ip - match)); op+=2;\n }\n\n /* Encode MatchLength */\n { unsigned matchCode;\n\n if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)\n && (lowLimit==dictionary) /* match within extDict */ ) {\n const BYTE* limit = ip + (dictEnd-match);\n assert(dictEnd > match);\n if (limit > matchlimit) limit = matchlimit;\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);\n ip += (size_t)matchCode + MINMATCH;\n if (ip==limit) {\n unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);\n matchCode += more;\n ip += more;\n }\n DEBUGLOG(6, \" with matchLength=%u starting in extDict\", matchCode+MINMATCH);\n } else {\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);\n ip += (size_t)matchCode + MINMATCH;\n DEBUGLOG(6, \" with matchLength=%u\", matchCode+MINMATCH);\n }\n\n if ((outputDirective) && /* Check output buffer overflow */\n (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {\n if (outputDirective == fillOutput) {\n /* Match description too long : reduce it */\n U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;\n ip -= matchCode - newMatchCode;\n assert(newMatchCode < matchCode);\n matchCode = newMatchCode;\n if (unlikely(ip <= filledIp)) {\n /* We have already filled up to filledIp so if ip ends up less than filledIp\n * we have positions in the hash table beyond the current position. This is\n * a problem if we reuse the hash table. So we have to remove these positions\n * from the hash table.\n */\n const BYTE* ptr;\n DEBUGLOG(5, \"Clearing %u positions\", (U32)(filledIp - ip));\n for (ptr = ip; ptr <= filledIp; ++ptr) {\n U32 const h = LZ4_hashPosition(ptr, tableType);\n LZ4_clearHash(h, cctx->hashTable, tableType);\n }\n }\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n if (matchCode >= ML_MASK) {\n *token += ML_MASK;\n matchCode -= ML_MASK;\n LZ4_write32(op, 0xFFFFFFFF);\n while (matchCode >= 4*255) {\n op+=4;\n LZ4_write32(op, 0xFFFFFFFF);\n matchCode -= 4*255;\n }\n op += matchCode / 255;\n *op++ = (BYTE)(matchCode % 255);\n } else\n *token += (BYTE)(matchCode);\n }\n /* Ensure we have enough space for the last literals. */\n assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));\n\n anchor = ip;\n\n /* Test end of chunk */\n if (ip >= mflimitPlusOne) break;\n\n /* Fill table */\n { U32 const h = LZ4_hashPosition(ip-2, tableType);\n if (tableType == byPtr) {\n LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);\n } else {\n U32 const idx = (U32)((ip-2) - base);\n LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);\n } }\n\n /* Test next position */\n if (tableType == byPtr) {\n\n match = LZ4_getPosition(ip, cctx->hashTable, tableType);\n LZ4_putPosition(ip, cctx->hashTable, tableType);\n if ( (match+LZ4_DISTANCE_MAX >= ip)\n && (LZ4_read32(match) == LZ4_read32(ip)) )\n { token=op++; *token=0; goto _next_match; }\n\n } else { /* byU32, byU16 */\n\n U32 const h = LZ4_hashPosition(ip, tableType);\n U32 const current = (U32)(ip-base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n matchIndex += dictDelta;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else if (dictDirective==usingExtDict) {\n if (matchIndex < startIndex) {\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else { /* single memory segment */\n match = base + matchIndex;\n }\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)\n && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))\n && (LZ4_read32(match) == LZ4_read32(ip)) ) {\n token=op++;\n *token=0;\n if (maybe_extMem) offset = current - matchIndex;\n DEBUGLOG(6, \"seq.start:%i, literals=%u, match.start:%i\",\n (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));\n goto _next_match;\n }\n }\n\n /* Prepare next loop */\n forwardH = LZ4_hashPosition(++ip, tableType);\n\n }\n\n_last_literals:\n /* Encode Last Literals */\n { size_t lastRun = (size_t)(iend - anchor);\n if ( (outputDirective) && /* Check output buffer overflow */\n (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {\n if (outputDirective == fillOutput) {\n /* adapt lastRun to fill 'dst' */\n assert(olimit >= op);\n lastRun = (size_t)(olimit-op) - 1/*token*/;\n lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n DEBUGLOG(6, \"Final literal run : %i literals\", (int)lastRun);\n if (lastRun >= RUN_MASK) {\n size_t accumulator = lastRun - RUN_MASK;\n *op++ = RUN_MASK << ML_BITS;\n for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;\n *op++ = (BYTE) accumulator;\n } else {\n *op++ = (BYTE)(lastRun< 0);\n DEBUGLOG(5, \"LZ4_compress_generic: compressed %i bytes into %i bytes\", inputSize, result);\n return result;\n}\n\n/** LZ4_compress_generic() :\n * inlined, to ensure branches are decided at compilation time;\n * takes care of src == (NULL, 0)\n * and forward the rest to LZ4_compress_generic_validated */\nLZ4_FORCE_INLINE int LZ4_compress_generic(\n LZ4_stream_t_internal* const cctx,\n const char* const src,\n char* const dst,\n const int srcSize,\n int *inputConsumed, /* only written when outputDirective == fillOutput */\n const int dstCapacity,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n DEBUGLOG(5, \"LZ4_compress_generic: srcSize=%i, dstCapacity=%i\",\n srcSize, dstCapacity);\n\n if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */\n if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */\n if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */\n DEBUGLOG(5, \"Generating an empty block\");\n assert(outputDirective == notLimited || dstCapacity >= 1);\n assert(dst != NULL);\n dst[0] = 0;\n if (outputDirective == fillOutput) {\n assert (inputConsumed != NULL);\n *inputConsumed = 0;\n }\n return 1;\n }\n assert(src != NULL);\n\n return LZ4_compress_generic_validated(cctx, src, dst, srcSize,\n inputConsumed, /* only written into if outputDirective == fillOutput */\n dstCapacity, outputDirective,\n tableType, dictDirective, dictIssue, acceleration);\n}\n\n\nint LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;\n assert(ctx != NULL);\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n if (maxOutputSize >= LZ4_compressBound(inputSize)) {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n/**\n * LZ4_compress_fast_extState_fastReset() :\n * A variant of LZ4_compress_fast_extState().\n *\n * Using this variant avoids an expensive initialization step. It is only safe\n * to call if the state buffer is known to be correctly initialized already\n * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of\n * \"correctly initialized\").\n */\nint LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n assert(ctx != NULL);\n\n if (dstCapacity >= LZ4_compressBound(srcSize)) {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n\nint LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)\n{\n int result;\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctxPtr == NULL) return 0;\n#else\n LZ4_stream_t ctx;\n LZ4_stream_t* const ctxPtr = &ctx;\n#endif\n result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctxPtr);\n#endif\n return result;\n}\n\n\nint LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);\n}\n\n\n/* Note!: This function leaves the stream in an unclean/broken state!\n * It is not safe to subsequently use the same state with a _fastReset() or\n * _continue() call without resetting it. */\nstatic int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n void* const s = LZ4_initStream(state, sizeof (*state));\n assert(s != NULL); (void)s;\n\n if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */\n return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);\n } else {\n if (*srcSizePtr < LZ4_64Klimit) {\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);\n } }\n}\n\nint LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);\n /* clean the state on exit */\n LZ4_initStream(state, sizeof (LZ4_stream_t));\n return r;\n}\n\n\nint LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)\n{\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctx == NULL) return 0;\n#else\n LZ4_stream_t ctxBody;\n LZ4_stream_t* const ctx = &ctxBody;\n#endif\n\n int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctx);\n#endif\n return result;\n}\n\n\n\n/*-******************************\n* Streaming functions\n********************************/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_stream_t* LZ4_createStream(void)\n{\n LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));\n LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));\n DEBUGLOG(4, \"LZ4_createStream %p\", lz4s);\n if (lz4s == NULL) return NULL;\n LZ4_initStream(lz4s, sizeof(*lz4s));\n return lz4s;\n}\n#endif\n\nstatic size_t LZ4_stream_t_alignment(void)\n{\n#if LZ4_ALIGN_TEST\n typedef struct { char c; LZ4_stream_t t; } t_a;\n return sizeof(t_a) - sizeof(LZ4_stream_t);\n#else\n return 1; /* effectively disabled */\n#endif\n}\n\nLZ4_stream_t* LZ4_initStream (void* buffer, size_t size)\n{\n DEBUGLOG(5, \"LZ4_initStream\");\n if (buffer == NULL) { return NULL; }\n if (size < sizeof(LZ4_stream_t)) { return NULL; }\n if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;\n MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));\n return (LZ4_stream_t*)buffer;\n}\n\n/* resetStream is now deprecated,\n * prefer initStream() which is more general */\nvoid LZ4_resetStream (LZ4_stream_t* LZ4_stream)\n{\n DEBUGLOG(5, \"LZ4_resetStream (ctx:%p)\", LZ4_stream);\n MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));\n}\n\nvoid LZ4_resetStream_fast(LZ4_stream_t* ctx) {\n LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nint LZ4_freeStream (LZ4_stream_t* LZ4_stream)\n{\n if (!LZ4_stream) return 0; /* support free on NULL */\n DEBUGLOG(5, \"LZ4_freeStream %p\", LZ4_stream);\n FREEMEM(LZ4_stream);\n return (0);\n}\n#endif\n\n\ntypedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;\n#define HASH_UNIT sizeof(reg_t)\nint LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,\n const char* dictionary, int dictSize,\n LoadDict_mode_e _ld)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n const tableType_t tableType = byU32;\n const BYTE* p = (const BYTE*)dictionary;\n const BYTE* const dictEnd = p + dictSize;\n U32 idx32;\n\n DEBUGLOG(4, \"LZ4_loadDict (%i bytes from %p into %p)\", dictSize, dictionary, LZ4_dict);\n\n /* It's necessary to reset the context,\n * and not just continue it with prepareTable()\n * to avoid any risk of generating overflowing matchIndex\n * when compressing using this dictionary */\n LZ4_resetStream(LZ4_dict);\n\n /* We always increment the offset by 64 KB, since, if the dict is longer,\n * we truncate it to the last 64k, and if it's shorter, we still want to\n * advance by a whole window length so we can provide the guarantee that\n * there are only valid offsets in the window, which allows an optimization\n * in LZ4_compress_fast_continue() where it uses noDictIssue even when the\n * dictionary isn't a full 64k. */\n dict->currentOffset += 64 KB;\n\n if (dictSize < (int)HASH_UNIT) {\n return 0;\n }\n\n if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;\n dict->dictionary = p;\n dict->dictSize = (U32)(dictEnd - p);\n dict->tableType = (U32)tableType;\n idx32 = dict->currentOffset - dict->dictSize;\n\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n /* Note: overwriting => favors positions end of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n p+=3; idx32+=3;\n }\n\n if (_ld == _ld_slow) {\n /* Fill hash table with additional references, to improve compression capability */\n p = dict->dictionary;\n idx32 = dict->currentOffset - dict->dictSize;\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n U32 const limit = dict->currentOffset - 64 KB;\n if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {\n /* Note: not overwriting => favors positions beginning of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n }\n p++; idx32++;\n }\n }\n\n return (int)dict->dictSize;\n}\n\nint LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);\n}\n\nint LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);\n}\n\nvoid LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)\n{\n const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :\n &(dictionaryStream->internal_donotuse);\n\n DEBUGLOG(4, \"LZ4_attach_dictionary (%p, %p, size %u)\",\n workingStream, dictionaryStream,\n dictCtx != NULL ? dictCtx->dictSize : 0);\n\n if (dictCtx != NULL) {\n /* If the current offset is zero, we will never look in the\n * external dictionary context, since there is no value a table\n * entry can take that indicate a miss. In that case, we need\n * to bump the offset to something non-zero.\n */\n if (workingStream->internal_donotuse.currentOffset == 0) {\n workingStream->internal_donotuse.currentOffset = 64 KB;\n }\n\n /* Don't actually attach an empty dictionary.\n */\n if (dictCtx->dictSize == 0) {\n dictCtx = NULL;\n }\n }\n workingStream->internal_donotuse.dictCtx = dictCtx;\n}\n\n\nstatic void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)\n{\n assert(nextSize >= 0);\n if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */\n /* rescale hash table */\n U32 const delta = LZ4_dict->currentOffset - 64 KB;\n const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;\n int i;\n DEBUGLOG(4, \"LZ4_renormDictT\");\n for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0;\n else LZ4_dict->hashTable[i] -= delta;\n }\n LZ4_dict->currentOffset = 64 KB;\n if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;\n LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;\n }\n}\n\n\nint LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,\n const char* source, char* dest,\n int inputSize, int maxOutputSize,\n int acceleration)\n{\n const tableType_t tableType = byU32;\n LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;\n const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;\n\n DEBUGLOG(5, \"LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)\", inputSize, streamPtr->dictSize);\n\n LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n\n /* invalidate tiny dictionaries */\n if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */\n && (dictEnd != source) /* prefix mode */\n && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */\n && (streamPtr->dictCtx == NULL) /* usingDictCtx */\n ) {\n DEBUGLOG(5, \"LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small\", streamPtr->dictSize, streamPtr->dictionary);\n /* remove dictionary existence from history, to employ faster prefix mode */\n streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)source;\n dictEnd = source;\n }\n\n /* Check overlapping input/dictionary space */\n { const char* const sourceEnd = source + inputSize;\n if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {\n streamPtr->dictSize = (U32)(dictEnd - sourceEnd);\n if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;\n if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;\n }\n }\n\n /* prefix mode : source data follows dictionary */\n if (dictEnd == source) {\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);\n else\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);\n }\n\n /* external dictionary mode */\n { int result;\n if (streamPtr->dictCtx) {\n /* We depend here on the fact that dictCtx'es (produced by\n * LZ4_loadDict) guarantee that their tables contain no references\n * to offsets between dictCtx->currentOffset - 64 KB and\n * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe\n * to use noDictIssue even when the dict isn't a full 64 KB.\n */\n if (inputSize > 4 KB) {\n /* For compressing large blobs, it is faster to pay the setup\n * cost to copy the dictionary's tables into the active context,\n * so that the compression loop is only looking into one table.\n */\n LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);\n }\n } else { /* small data <= 4 KB */\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n }\n }\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)inputSize;\n return result;\n }\n}\n\n\n/* Hidden debug function, to force-test external dictionary mode */\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;\n int result;\n\n LZ4_renormDictT(streamPtr, srcSize);\n\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);\n }\n\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)srcSize;\n\n return result;\n}\n\n\n/*! LZ4_saveDict() :\n * If previously compressed data block is not guaranteed to remain available at its memory location,\n * save it into a safer place (char* safeBuffer).\n * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,\n * one can therefore call LZ4_compress_fast_continue() right after.\n * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.\n */\nint LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n\n DEBUGLOG(5, \"LZ4_saveDict : dictSize=%i, safeBuffer=%p\", dictSize, safeBuffer);\n\n if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */\n if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }\n\n if (safeBuffer == NULL) assert(dictSize == 0);\n if (dictSize > 0) {\n const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;\n assert(dict->dictionary);\n LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);\n }\n\n dict->dictionary = (const BYTE*)safeBuffer;\n dict->dictSize = (U32)dictSize;\n\n return dictSize;\n}\n\n\n\n/*-*******************************\n * Decompression functions\n ********************************/\n\ntypedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;\n\n#undef MIN\n#define MIN(a,b) ( (a) < (b) ? (a) : (b) )\n\n\n/* variant for decompress_unsafe()\n * does not know end of input\n * presumes input is well formed\n * note : will consume at least one byte */\nstatic size_t read_long_length_no_check(const BYTE** pp)\n{\n size_t b, l = 0;\n do { b = **pp; (*pp)++; l += b; } while (b==255);\n DEBUGLOG(6, \"read_long_length_no_check: +length=%zu using %zu input bytes\", l, l/255 + 1)\n return l;\n}\n\n/* core decoder variant for LZ4_decompress_fast*()\n * for legacy support only : these entry points are deprecated.\n * - Presumes input is correctly formed (no defense vs malformed inputs)\n * - Does not know input size (presume input buffer is \"large enough\")\n * - Decompress a full block (only)\n * @return : nb of bytes read from input.\n * Note : this variant is not optimized for speed, just for maintenance.\n * the goal is to remove support of decompress_fast*() variants by v2.0\n**/\nLZ4_FORCE_INLINE int\nLZ4_decompress_unsafe_generic(\n const BYTE* const istart,\n BYTE* const ostart,\n int decompressedSize,\n\n size_t prefixSize,\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note: =0 if dictStart==NULL */\n )\n{\n const BYTE* ip = istart;\n BYTE* op = (BYTE*)ostart;\n BYTE* const oend = ostart + decompressedSize;\n const BYTE* const prefixStart = ostart - prefixSize;\n\n DEBUGLOG(5, \"LZ4_decompress_unsafe_generic\");\n if (dictStart == NULL) assert(dictSize == 0);\n\n while (1) {\n /* start new sequence */\n unsigned token = *ip++;\n\n /* literals */\n { size_t ll = token >> ML_BITS;\n if (ll==15) {\n /* long literal length */\n ll += read_long_length_no_check(&ip);\n }\n if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */\n LZ4_memmove(op, ip, ll); /* support in-place decompression */\n op += ll;\n ip += ll;\n if ((size_t)(oend-op) < MFLIMIT) {\n if (op==oend) break; /* end of block */\n DEBUGLOG(5, \"invalid: literals end at distance %zi from end of block\", oend-op);\n /* incorrect end of block :\n * last match must start at least MFLIMIT==12 bytes before end of output block */\n return -1;\n } }\n\n /* match */\n { size_t ml = token & 15;\n size_t const offset = LZ4_readLE16(ip);\n ip+=2;\n\n if (ml==15) {\n /* long literal length */\n ml += read_long_length_no_check(&ip);\n }\n ml += MINMATCH;\n\n if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */\n\n { const BYTE* match = op - offset;\n\n /* out of range */\n if (offset > (size_t)(op - prefixStart) + dictSize) {\n DEBUGLOG(6, \"offset out of range\");\n return -1;\n }\n\n /* check special case : extDict */\n if (offset > (size_t)(op - prefixStart)) {\n /* extDict scenario */\n const BYTE* const dictEnd = dictStart + dictSize;\n const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));\n size_t const extml = (size_t)(dictEnd - extMatch);\n if (extml > ml) {\n /* match entirely within extDict */\n LZ4_memmove(op, extMatch, ml);\n op += ml;\n ml = 0;\n } else {\n /* match split between extDict & prefix */\n LZ4_memmove(op, extMatch, extml);\n op += extml;\n ml -= extml;\n }\n match = prefixStart;\n }\n\n /* match copy - slow variant, supporting overlap copy */\n { size_t u;\n for (u=0; u= ipmax before start of loop. Returns initial_error if so.\n * @error (output) - error code. Must be set to 0 before call.\n**/\ntypedef size_t Rvl_t;\nstatic const Rvl_t rvl_error = (Rvl_t)(-1);\nLZ4_FORCE_INLINE Rvl_t\nread_variable_length(const BYTE** ip, const BYTE* ilimit,\n int initial_check)\n{\n Rvl_t s, length = 0;\n assert(ip != NULL);\n assert(*ip != NULL);\n assert(ilimit != NULL);\n if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */\n return rvl_error;\n }\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n if (likely(s != 255)) return length;\n do {\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n } while (s == 255);\n\n return length;\n}\n\n/*! LZ4_decompress_generic() :\n * This generic decompression function covers all use cases.\n * It shall be instantiated several times, using different sets of directives.\n * Note that it is important for performance that this function really get inlined,\n * in order to remove useless branches during compilation optimization.\n */\nLZ4_FORCE_INLINE int\nLZ4_decompress_generic(\n const char* const src,\n char* const dst,\n int srcSize,\n int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */\n\n earlyEnd_directive partialDecoding, /* full, partial */\n dict_directive dict, /* noDict, withPrefix64k, usingExtDict */\n const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note : = 0 if noDict */\n )\n{\n if ((src == NULL) || (outputSize < 0)) { return -1; }\n\n { const BYTE* ip = (const BYTE*) src;\n const BYTE* const iend = ip + srcSize;\n\n BYTE* op = (BYTE*) dst;\n BYTE* const oend = op + outputSize;\n BYTE* cpy;\n\n const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;\n\n const int checkOffset = (dictSize < (int)(64 KB));\n\n\n /* Set up the \"end\" pointers for the shortcut. */\n const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;\n const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;\n\n const BYTE* match;\n size_t offset;\n unsigned token;\n size_t length;\n\n\n DEBUGLOG(5, \"LZ4_decompress_generic (srcSize:%i, dstSize:%i)\", srcSize, outputSize);\n\n /* Special cases */\n assert(lowPrefix <= op);\n if (unlikely(outputSize==0)) {\n /* Empty output buffer */\n if (partialDecoding) return 0;\n return ((srcSize==1) && (*ip==0)) ? 0 : -1;\n }\n if (unlikely(srcSize==0)) { return -1; }\n\n /* LZ4_FAST_DEC_LOOP:\n * designed for modern OoO performance cpus,\n * where copying reliably 32-bytes is preferable to an unpredictable branch.\n * note : fast loop may show a regression for some client arm chips. */\n#if LZ4_FAST_DEC_LOOP\n if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(6, \"move to safe decode loop\");\n goto safe_decode;\n }\n\n /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using fast decode loop\");\n while (1) {\n /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */\n assert(oend - op >= FASTLOOP_SAFE_DISTANCE);\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) {\n DEBUGLOG(6, \"error reading long literal length\");\n goto _output_error;\n }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n\n /* copy literals */\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }\n LZ4_wildCopy32(op, ip, op+length);\n ip += length; op += length;\n } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {\n /* We don't need to check oend, since we check it once for each loop below */\n DEBUGLOG(7, \"copy %u bytes in a 16-bytes stripe\", (unsigned)length);\n /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */\n LZ4_memcpy(op, ip, 16);\n ip += length; op += length;\n } else {\n goto safe_literal_copy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n DEBUGLOG(6, \"blockPos%6u: offset = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)offset);\n match = op - offset;\n assert(match <= op); /* overflow check */\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \" match length token = %u (len==%u)\", (unsigned)length, (unsigned)length+MINMATCH);\n\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) {\n DEBUGLOG(5, \"error reading long match length\");\n goto _output_error;\n }\n length += addl;\n length += MINMATCH;\n DEBUGLOG(7, \" long match length == %u\", (unsigned)length);\n if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n goto safe_match_copy;\n }\n } else {\n length += MINMATCH;\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(7, \"moving to safe_match_copy (ml==%u)\", (unsigned)length);\n goto safe_match_copy;\n }\n\n /* Fastpath check: skip LZ4_wildCopy32 when true */\n if ((dict == withPrefix64k) || (match >= lowPrefix)) {\n if (offset >= 8) {\n assert(match >= lowPrefix);\n assert(match <= op);\n assert(op + 18 <= oend);\n\n LZ4_memcpy(op, match, 8);\n LZ4_memcpy(op+8, match+8, 8);\n LZ4_memcpy(op+16, match+16, 2);\n op += length;\n continue;\n } } }\n\n if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {\n DEBUGLOG(5, \"Error : pos=%zi, offset=%zi => outside buffers\", op-lowPrefix, op-match);\n goto _output_error;\n }\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) {\n DEBUGLOG(7, \"partialDecoding: dictionary match, close to dstEnd\");\n length = MIN(length, (size_t)(oend-op));\n } else {\n DEBUGLOG(6, \"end-of-block condition violated\")\n goto _output_error;\n } }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) { *op++ = *copyFrom++; }\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n\n /* copy match within block */\n cpy = op + length;\n\n assert((op <= oend) && (oend-op >= 32));\n if (unlikely(offset<16)) {\n LZ4_memcpy_using_offset(op, match, cpy, offset);\n } else {\n LZ4_wildCopy32(op, match, cpy);\n }\n\n op = cpy; /* wildcopy correction */\n }\n safe_decode:\n#endif\n\n /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using safe decode loop\");\n while (1) {\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* A two-stage shortcut for the most common case:\n * 1) If the literal length is 0..14, and there is enough space,\n * enter the shortcut and copy 16 bytes on behalf of the literals\n * (in the fast mode, only 8 bytes can be safely copied this way).\n * 2) Further if the match length is 4..18, copy 18 bytes in a similar\n * manner; but we ensure that there's enough space in the output for\n * those 18 bytes earlier, upon entering the shortcut (in other words,\n * there is a combined check for both stages).\n */\n if ( (length != RUN_MASK)\n /* strictly \"less than\" on input, to re-enter the loop with at least one byte */\n && likely((ip < shortiend) & (op <= shortoend)) ) {\n /* Copy the literals */\n LZ4_memcpy(op, ip, 16);\n op += length; ip += length;\n\n /* The second stage: prepare for match copying, decode full info.\n * If it doesn't work out, the info won't be wasted. */\n length = token & ML_MASK; /* match length */\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u (len=%u)\", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);\n offset = LZ4_readLE16(ip); ip += 2;\n match = op - offset;\n assert(match <= op); /* check overflow */\n\n /* Do not deal with overlapping matches. */\n if ( (length != ML_MASK)\n && (offset >= 8)\n && (dict==withPrefix64k || match >= lowPrefix) ) {\n /* Copy the match. */\n LZ4_memcpy(op + 0, match + 0, 8);\n LZ4_memcpy(op + 8, match + 8, 8);\n LZ4_memcpy(op +16, match +16, 2);\n op += length + MINMATCH;\n /* Both stages worked, load the next token. */\n continue;\n }\n\n /* The second stage didn't work out, but the info is ready.\n * Propel it right to the point of match copying. */\n goto _copy_match;\n }\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n }\n\n#if LZ4_FAST_DEC_LOOP\n safe_literal_copy:\n#endif\n /* copy literals */\n cpy = op+length;\n\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {\n /* We've either hit the input parsing restriction or the output parsing restriction.\n * In the normal scenario, decoding a full block, it must be the last sequence,\n * otherwise it's an error (invalid input or dimensions).\n * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.\n */\n if (partialDecoding) {\n /* Since we are partial decoding we may be in this block because of the output parsing\n * restriction, which is not valid since the output buffer is allowed to be undersized.\n */\n DEBUGLOG(7, \"partialDecoding: copying literals, close to input or output end\")\n DEBUGLOG(7, \"partialDecoding: literal length = %u\", (unsigned)length);\n DEBUGLOG(7, \"partialDecoding: remaining space in dstBuffer : %i\", (int)(oend - op));\n DEBUGLOG(7, \"partialDecoding: remaining space in srcBuffer : %i\", (int)(iend - ip));\n /* Finishing in the middle of a literals segment,\n * due to lack of input.\n */\n if (ip+length > iend) {\n length = (size_t)(iend-ip);\n cpy = op + length;\n }\n /* Finishing in the middle of a literals segment,\n * due to lack of output space.\n */\n if (cpy > oend) {\n cpy = oend;\n assert(op<=oend);\n length = (size_t)(oend-op);\n }\n } else {\n /* We must be on the last sequence (or invalid) because of the parsing limitations\n * so check that we exactly consume the input and don't overrun the output buffer.\n */\n if ((ip+length != iend) || (cpy > oend)) {\n DEBUGLOG(5, \"should have been last run of literals\")\n DEBUGLOG(5, \"ip(%p) + length(%i) = %p != iend (%p)\", ip, (int)length, ip+length, iend);\n DEBUGLOG(5, \"or cpy(%p) > (oend-MFLIMIT)(%p)\", cpy, oend-MFLIMIT);\n DEBUGLOG(5, \"after writing %u bytes / %i bytes available\", (unsigned)(op-(BYTE*)dst), outputSize);\n goto _output_error;\n }\n }\n LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */\n ip += length;\n op += length;\n /* Necessarily EOF when !partialDecoding.\n * When partialDecoding, it is EOF if we've either\n * filled the output buffer or\n * can't proceed with reading an offset for following match.\n */\n if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {\n break;\n }\n } else {\n LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */\n ip += length; op = cpy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n match = op - offset;\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n _copy_match:\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */\n }\n length += MINMATCH;\n\n#if LZ4_FAST_DEC_LOOP\n safe_match_copy:\n#endif\n if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) length = MIN(length, (size_t)(oend-op));\n else goto _output_error; /* doesn't respect parsing restriction */\n }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) *op++ = *copyFrom++;\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n assert(match >= lowPrefix);\n\n /* copy match within block */\n cpy = op + length;\n\n /* partialDecoding : may end anywhere within the block */\n assert(op<=oend);\n if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n size_t const mlen = MIN(length, (size_t)(oend-op));\n const BYTE* const matchEnd = match + mlen;\n BYTE* const copyEnd = op + mlen;\n if (matchEnd > op) { /* overlap copy */\n while (op < copyEnd) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, mlen);\n }\n op = copyEnd;\n if (op == oend) { break; }\n continue;\n }\n\n if (unlikely(offset<8)) {\n LZ4_write32(op, 0); /* silence msan warning when offset==0 */\n op[0] = match[0];\n op[1] = match[1];\n op[2] = match[2];\n op[3] = match[3];\n match += inc32table[offset];\n LZ4_memcpy(op+4, match, 4);\n match -= dec64table[offset];\n } else {\n LZ4_memcpy(op, match, 8);\n match += 8;\n }\n op += 8;\n\n if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);\n if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */\n if (op < oCopyLimit) {\n LZ4_wildCopy8(op, match, oCopyLimit);\n match += oCopyLimit - op;\n op = oCopyLimit;\n }\n while (op < cpy) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, 8);\n if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }\n }\n op = cpy; /* wildcopy correction */\n }\n\n /* end of decoding */\n DEBUGLOG(5, \"decoded %i bytes\", (int) (((char*)op)-dst));\n return (int) (((char*)op)-dst); /* Nb of output bytes decoded */\n\n /* Overflow error detected */\n _output_error:\n return (int) (-(((const char*)ip)-src))-1;\n }\n}\n\n\n/*===== Instantiate the API decoding functions. =====*/\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,\n decode_full_block, noDict,\n (BYTE*)dest, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,\n partial_decode,\n noDict, (BYTE*)dst, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_fast(const char* source, char* dest, int originalSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_fast\");\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, NULL, 0);\n}\n\n/*===== Instantiate a few more decoding cases, used more than once. =====*/\n\nLZ4_FORCE_O2 /* Exported, an obsolete API function. */\nint LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\n/* Another obsolete API function, paired with the previous one. */\nint LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,\n size_t prefixSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_safe_forceExtDict\");\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,\n const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, (const BYTE*)dictStart, dictSize);\n}\n\n/* The \"double dictionary\" mode, for use with e.g. ring buffers: the first part\n * of the dictionary is passed as prefix, and the second via dictStart + dictSize.\n * These routines are used only once, in LZ4_decompress_*_continue().\n */\nLZ4_FORCE_INLINE\nint LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize, const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);\n}\n\n/*===== streaming decompression functions =====*/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_streamDecode_t* LZ4_createStreamDecode(void)\n{\n LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));\n return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));\n}\n\nint LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)\n{\n if (LZ4_stream == NULL) { return 0; } /* support free on NULL */\n FREEMEM(LZ4_stream);\n return 0;\n}\n#endif\n\n/*! LZ4_setStreamDecode() :\n * Use this function to instruct where to find the dictionary.\n * This function is not necessary if previous data is still available where it was decoded.\n * Loading a size of 0 is allowed (same effect as no dictionary).\n * @return : 1 if OK, 0 if error\n */\nint LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n lz4sd->prefixSize = (size_t)dictSize;\n if (dictSize) {\n assert(dictionary != NULL);\n lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;\n } else {\n lz4sd->prefixEnd = (const BYTE*) dictionary;\n }\n lz4sd->externalDict = NULL;\n lz4sd->extDictSize = 0;\n return 1;\n}\n\n/*! LZ4_decoderRingBufferSize() :\n * when setting a ring buffer for streaming decompression (optional scenario),\n * provides the minimum size of this ring buffer\n * to be compatible with any source respecting maxBlockSize condition.\n * Note : in a ring buffer scenario,\n * blocks are presumed decompressed next to each other.\n * When not enough space remains for next block (remainingSize < maxBlockSize),\n * decoding resumes from beginning of ring buffer.\n * @return : minimum ring buffer size,\n * or 0 if there is an error (invalid maxBlockSize).\n */\nint LZ4_decoderRingBufferSize(int maxBlockSize)\n{\n if (maxBlockSize < 0) return 0;\n if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;\n if (maxBlockSize < 16) maxBlockSize = 16;\n return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);\n}\n\n/*\n*_continue() :\n These decoding functions allow decompression of multiple blocks in \"streaming\" mode.\n Previously decoded blocks must still be available at the memory position where they were decoded.\n If it's not possible, save the relevant part of decoded data into a safe buffer,\n and indicate where it stands using LZ4_setStreamDecode()\n*/\nLZ4_FORCE_O2\nint LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n int result;\n\n if (lz4sd->prefixSize == 0) {\n /* The first call, no dictionary yet. */\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n /* They're rolling the current segment. */\n if (lz4sd->prefixSize >= 64 KB - 1)\n result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n else if (lz4sd->extDictSize == 0)\n result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize);\n else\n result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)result;\n lz4sd->prefixEnd += result;\n } else {\n /* The buffer wraps around, or they're switching to another buffer. */\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n }\n\n return result;\n}\n\nLZ4_FORCE_O2 int\nLZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,\n const char* source, char* dest, int originalSize)\n{\n LZ4_streamDecode_t_internal* const lz4sd =\n (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);\n int result;\n\n DEBUGLOG(5, \"LZ4_decompress_fast_continue (toDecodeSize=%i)\", originalSize);\n assert(originalSize >= 0);\n\n if (lz4sd->prefixSize == 0) {\n DEBUGLOG(5, \"first invocation : no prefix nor extDict\");\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_fast(source, dest, originalSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n DEBUGLOG(5, \"continue using existing prefix\");\n result = LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n lz4sd->prefixSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)originalSize;\n lz4sd->prefixEnd += originalSize;\n } else {\n DEBUGLOG(5, \"prefix becomes extDict\");\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_fast_extDict(source, dest, originalSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n }\n\n return result;\n}\n\n\n/*\nAdvanced decoding functions :\n*_usingDict() :\n These decoding functions work the same as \"_continue\" ones,\n the dictionary must be explicitly provided within parameters\n*/\n\nint LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0 || dictStart+dictSize == dest)\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n (size_t)dictSize, NULL, 0);\n assert(dictSize >= 0);\n return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);\n}\n\n\n/*=*************************************************\n* Obsolete Functions\n***************************************************/\n/* obsolete compression functions */\nint LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)\n{\n return LZ4_compress_default(source, dest, inputSize, maxOutputSize);\n}\nint LZ4_compress(const char* src, char* dest, int srcSize)\n{\n return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));\n}\nint LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);\n}\nint LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);\n}\nint LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);\n}\nint LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)\n{\n return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);\n}\n\n/*\nThese decompression functions are deprecated and should no longer be used.\nThey are only provided here for compatibility with older user programs.\n- LZ4_uncompress is totally equivalent to LZ4_decompress_fast\n- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe\n*/\nint LZ4_uncompress (const char* source, char* dest, int outputSize)\n{\n return LZ4_decompress_fast(source, dest, outputSize);\n}\nint LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)\n{\n return LZ4_decompress_safe(source, dest, isize, maxOutputSize);\n}\n\n/* Obsolete Streaming functions */\n\nint LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }\n\nint LZ4_resetStreamState(void* state, char* inputBuffer)\n{\n (void)inputBuffer;\n LZ4_resetStream((LZ4_stream_t*)state);\n return 0;\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nvoid* LZ4_create (char* inputBuffer)\n{\n (void)inputBuffer;\n return LZ4_createStream();\n}\n#endif\n\nchar* LZ4_slideInputBuffer (void* state)\n{\n /* avoid const char * -> char * conversion warning */\n return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;\n}\n\n#endif /* LZ4_COMMONDEFS_ONLY */\n"], ["/pogocache/src/hashmap.c", "// Copyright 2020 Joshua J Baker. All rights reserved.\n// Use of this source code is governed by an MIT-style\n// license that can be found in the LICENSE file.\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\n#define GROW_AT 0.60 /* 60% */\n#define SHRINK_AT 0.10 /* 10% */\n\n#ifndef HASHMAP_LOAD_FACTOR\n#define HASHMAP_LOAD_FACTOR GROW_AT\n#endif\n\nstatic void *(*__malloc)(size_t) = NULL;\nstatic void *(*__realloc)(void *, size_t) = NULL;\nstatic void (*__free)(void *) = NULL;\n\n// hashmap_set_allocator allows for configuring a custom allocator for\n// all hashmap library operations. This function, if needed, should be called\n// only once at startup and a prior to calling hashmap_new().\nvoid hashmap_set_allocator(void *(*malloc)(size_t), void (*free)(void*)) {\n __malloc = malloc;\n __free = free;\n}\n\nstruct bucket {\n uint64_t hash:48;\n uint64_t dib:16;\n};\n\n// hashmap is an open addressed hash map using robinhood hashing.\nstruct hashmap {\n void *(*malloc)(size_t);\n void *(*realloc)(void *, size_t);\n void (*free)(void *);\n size_t elsize;\n size_t cap;\n uint64_t seed0;\n uint64_t seed1;\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1);\n int (*compare)(const void *a, const void *b, void *udata);\n void (*elfree)(void *item);\n void *udata;\n size_t bucketsz;\n size_t nbuckets;\n size_t count;\n size_t mask;\n size_t growat;\n size_t shrinkat;\n uint8_t loadfactor;\n uint8_t growpower;\n bool oom;\n void *buckets;\n void *spare;\n void *edata;\n};\n\nvoid hashmap_set_grow_by_power(struct hashmap *map, size_t power) {\n map->growpower = power < 1 ? 1 : power > 16 ? 16 : power;\n}\n\nstatic double clamp_load_factor(double factor, double default_factor) {\n // Check for NaN and clamp between 50% and 90%\n return factor != factor ? default_factor : \n factor < 0.50 ? 0.50 : \n factor > 0.95 ? 0.95 : \n factor;\n}\n\nvoid hashmap_set_load_factor(struct hashmap *map, double factor) {\n factor = clamp_load_factor(factor, map->loadfactor / 100.0);\n map->loadfactor = factor * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n}\n\nstatic struct bucket *bucket_at0(void *buckets, size_t bucketsz, size_t i) {\n return (struct bucket*)(((char*)buckets)+(bucketsz*i));\n}\n\nstatic struct bucket *bucket_at(struct hashmap *map, size_t index) {\n return bucket_at0(map->buckets, map->bucketsz, index);\n}\n\nstatic void *bucket_item(struct bucket *entry) {\n return ((char*)entry)+sizeof(struct bucket);\n}\n\nstatic uint64_t clip_hash(uint64_t hash) {\n return hash & 0xFFFFFFFFFFFF;\n}\n\nstatic uint64_t get_hash(struct hashmap *map, const void *key) {\n return clip_hash(map->hash(key, map->seed0, map->seed1));\n}\n\n\n// hashmap_new_with_allocator returns a new hash map using a custom allocator.\n// See hashmap_new for more information information\nstruct hashmap *hashmap_new_with_allocator(void *(*_malloc)(size_t), \n void *(*_realloc)(void*, size_t), void (*_free)(void*),\n size_t elsize, size_t cap, uint64_t seed0, uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n _malloc = _malloc ? _malloc : __malloc ? __malloc : malloc;\n _realloc = _realloc ? _realloc : __realloc ? __realloc : realloc;\n _free = _free ? _free : __free ? __free : free;\n size_t ncap = 16;\n if (cap < ncap) {\n cap = ncap;\n } else {\n while (ncap < cap) {\n ncap *= 2;\n }\n cap = ncap;\n }\n size_t bucketsz = sizeof(struct bucket) + elsize;\n while (bucketsz & (sizeof(uintptr_t)-1)) {\n bucketsz++;\n }\n // hashmap + spare + edata\n size_t size = sizeof(struct hashmap)+bucketsz*2;\n struct hashmap *map = _malloc(size);\n if (!map) {\n return NULL;\n }\n memset(map, 0, sizeof(struct hashmap));\n map->elsize = elsize;\n map->bucketsz = bucketsz;\n map->seed0 = seed0;\n map->seed1 = seed1;\n map->hash = hash;\n map->compare = compare;\n map->elfree = elfree;\n map->udata = udata;\n map->spare = ((char*)map)+sizeof(struct hashmap);\n map->edata = (char*)map->spare+bucketsz;\n map->cap = cap;\n map->nbuckets = cap;\n map->mask = map->nbuckets-1;\n map->buckets = _malloc(map->bucketsz*map->nbuckets);\n if (!map->buckets) {\n _free(map);\n return NULL;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->growpower = 1;\n map->loadfactor = clamp_load_factor(HASHMAP_LOAD_FACTOR, GROW_AT) * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n map->shrinkat = map->nbuckets * SHRINK_AT;\n map->malloc = _malloc;\n map->realloc = _realloc;\n map->free = _free;\n return map; \n}\n\n// hashmap_new returns a new hash map. \n// Param `elsize` is the size of each element in the tree. Every element that\n// is inserted, deleted, or retrieved will be this size.\n// Param `cap` is the default lower capacity of the hashmap. Setting this to\n// zero will default to 16.\n// Params `seed0` and `seed1` are optional seed values that are passed to the \n// following `hash` function. These can be any value you wish but it's often \n// best to use randomly generated values.\n// Param `hash` is a function that generates a hash value for an item. It's\n// important that you provide a good hash function, otherwise it will perform\n// poorly or be vulnerable to Denial-of-service attacks. This implementation\n// comes with two helper functions `hashmap_sip()` and `hashmap_murmur()`.\n// Param `compare` is a function that compares items in the tree. See the \n// qsort stdlib function for an example of how this function works.\n// The hashmap must be freed with hashmap_free(). \n// Param `elfree` is a function that frees a specific item. This should be NULL\n// unless you're storing some kind of reference data in the hash.\nstruct hashmap *hashmap_new(size_t elsize, size_t cap, uint64_t seed0, \n uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n return hashmap_new_with_allocator(NULL, NULL, NULL, elsize, cap, seed0, \n seed1, hash, compare, elfree, udata);\n}\n\nstatic void free_elements(struct hashmap *map) {\n if (map->elfree) {\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib) map->elfree(bucket_item(bucket));\n }\n }\n}\n\n// hashmap_clear quickly clears the map. \n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\n// When the update_cap is provided, the map's capacity will be updated to match\n// the currently number of allocated buckets. This is an optimization to ensure\n// that this operation does not perform any allocations.\nvoid hashmap_clear(struct hashmap *map, bool update_cap) {\n map->count = 0;\n free_elements(map);\n if (update_cap) {\n map->cap = map->nbuckets;\n } else if (map->nbuckets != map->cap) {\n void *new_buckets = map->malloc(map->bucketsz*map->cap);\n if (new_buckets) {\n map->free(map->buckets);\n map->buckets = new_buckets;\n }\n map->nbuckets = map->cap;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * (map->loadfactor / 100.0) ;\n map->shrinkat = map->nbuckets * SHRINK_AT;\n}\n\nstatic bool resize0(struct hashmap *map, size_t new_cap) {\n struct hashmap *map2 = hashmap_new_with_allocator(map->malloc, map->realloc, \n map->free, map->elsize, new_cap, map->seed0, map->seed1, map->hash, \n map->compare, map->elfree, map->udata);\n if (!map2) return false;\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *entry = bucket_at(map, i);\n if (!entry->dib) {\n continue;\n }\n entry->dib = 1;\n size_t j = entry->hash & map2->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map2, j);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n break;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map2->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map2->spare, map->bucketsz);\n }\n j = (j + 1) & map2->mask;\n entry->dib += 1;\n }\n }\n map->free(map->buckets);\n map->buckets = map2->buckets;\n map->nbuckets = map2->nbuckets;\n map->mask = map2->mask;\n map->growat = map2->growat;\n map->shrinkat = map2->shrinkat;\n map->free(map2);\n return true;\n}\n\nstatic bool resize(struct hashmap *map, size_t new_cap) {\n return resize0(map, new_cap);\n}\n\n// hashmap_set_with_hash works like hashmap_set but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_set_with_hash(struct hashmap *map, const void *item,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*(1<growpower))) {\n map->oom = true;\n return NULL;\n }\n }\n\n struct bucket *entry = map->edata;\n entry->hash = hash;\n entry->dib = 1;\n void *eitem = bucket_item(entry);\n memcpy(eitem, item, map->elsize);\n\n void *bitem;\n size_t i = entry->hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n map->count++;\n return NULL;\n }\n bitem = bucket_item(bucket);\n if (entry->hash == bucket->hash && (!map->compare ||\n map->compare(eitem, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n memcpy(bitem, eitem, map->elsize);\n return map->spare;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map->spare, map->bucketsz);\n eitem = bucket_item(entry);\n }\n i = (i + 1) & map->mask;\n entry->dib += 1;\n }\n}\n\n// hashmap_set inserts or replaces an item in the hash map. If an item is\n// replaced then it is returned otherwise NULL is returned. This operation\n// may allocate memory. If the system is unable to allocate additional\n// memory then NULL is returned and hashmap_oom() returns true.\nconst void *hashmap_set(struct hashmap *map, const void *item) {\n return hashmap_set_with_hash(map, item, get_hash(map, item));\n}\n\n// hashmap_get_with_hash works like hashmap_get but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_get_with_hash(struct hashmap *map, const void *key, \n uint64_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) return NULL;\n if (bucket->hash == hash) {\n void *bitem = bucket_item(bucket);\n if (!map->compare || map->compare(key, bitem, map->udata) == 0) {\n return bitem;\n }\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_get returns the item based on the provided key. If the item is not\n// found then NULL is returned.\nconst void *hashmap_get(struct hashmap *map, const void *key) {\n return hashmap_get_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_probe returns the item in the bucket at position or NULL if an item\n// is not set for that bucket. The position is 'moduloed' by the number of \n// buckets in the hashmap.\nconst void *hashmap_probe(struct hashmap *map, uint64_t position) {\n size_t i = position & map->mask;\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n return bucket_item(bucket);\n}\n\n// hashmap_delete_with_hash works like hashmap_delete but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_delete_with_hash(struct hashmap *map, const void *key,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n void *bitem = bucket_item(bucket);\n if (bucket->hash == hash && (!map->compare ||\n map->compare(key, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n bucket->dib = 0;\n while(1) {\n struct bucket *prev = bucket;\n i = (i + 1) & map->mask;\n bucket = bucket_at(map, i);\n if (bucket->dib <= 1) {\n prev->dib = 0;\n break;\n }\n memcpy(prev, bucket, map->bucketsz);\n prev->dib--;\n }\n map->count--;\n if (map->nbuckets > map->cap && map->count <= map->shrinkat) {\n // Ignore the return value. It's ok for the resize operation to\n // fail to allocate enough memory because a shrink operation\n // does not change the integrity of the data.\n resize(map, map->nbuckets/2);\n }\n return map->spare;\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_delete removes an item from the hash map and returns it. If the\n// item is not found then NULL is returned.\nconst void *hashmap_delete(struct hashmap *map, const void *key) {\n return hashmap_delete_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_count returns the number of items in the hash map.\nsize_t hashmap_count(struct hashmap *map) {\n return map->count;\n}\n\n// hashmap_free frees the hash map\n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\nvoid hashmap_free(struct hashmap *map) {\n if (!map) return;\n free_elements(map);\n map->free(map->buckets);\n map->free(map);\n}\n\n// hashmap_oom returns true if the last hashmap_set() call failed due to the \n// system being out of memory.\nbool hashmap_oom(struct hashmap *map) {\n return map->oom;\n}\n\n// hashmap_scan iterates over all items in the hash map\n// Param `iter` can return false to stop iteration early.\n// Returns false if the iteration has been stopped early.\nbool hashmap_scan(struct hashmap *map, \n bool (*iter)(const void *item, void *udata), void *udata)\n{\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib && !iter(bucket_item(bucket), udata)) {\n return false;\n }\n }\n return true;\n}\n\n// hashmap_iter iterates one key at a time yielding a reference to an\n// entry at each iteration. Useful to write simple loops and avoid writing\n// dedicated callbacks and udata structures, as in hashmap_scan.\n//\n// map is a hash map handle. i is a pointer to a size_t cursor that\n// should be initialized to 0 at the beginning of the loop. item is a void\n// pointer pointer that is populated with the retrieved item. Note that this\n// is NOT a copy of the item stored in the hash map and can be directly\n// modified.\n//\n// Note that if hashmap_delete() is called on the hashmap being iterated,\n// the buckets are rearranged and the iterator must be reset to 0, otherwise\n// unexpected results may be returned after deletion.\n//\n// This function has not been tested for thread safety.\n//\n// The function returns true if an item was retrieved; false if the end of the\n// iteration has been reached.\nbool hashmap_iter(struct hashmap *map, size_t *i, void **item) {\n struct bucket *bucket;\n do {\n if (*i >= map->nbuckets) return false;\n bucket = bucket_at(map, *i);\n (*i)++;\n } while (!bucket->dib);\n *item = bucket_item(bucket);\n return true;\n}\n\n\n//-----------------------------------------------------------------------------\n// SipHash reference C implementation\n//\n// Copyright (c) 2012-2016 Jean-Philippe Aumasson\n// \n// Copyright (c) 2012-2014 Daniel J. Bernstein \n//\n// To the extent possible under law, the author(s) have dedicated all copyright\n// and related and neighboring rights to this software to the public domain\n// worldwide. This software is distributed without any warranty.\n//\n// You should have received a copy of the CC0 Public Domain Dedication along\n// with this software. If not, see\n// .\n//\n// default: SipHash-2-4\n//-----------------------------------------------------------------------------\nstatic uint64_t SIP64(const uint8_t *in, const size_t inlen, uint64_t seed0,\n uint64_t seed1) \n{\n#define U8TO64_LE(p) \\\n { (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \\\n ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \\\n ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \\\n ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56)) }\n#define U64TO8_LE(p, v) \\\n { U32TO8_LE((p), (uint32_t)((v))); \\\n U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); }\n#define U32TO8_LE(p, v) \\\n { (p)[0] = (uint8_t)((v)); \\\n (p)[1] = (uint8_t)((v) >> 8); \\\n (p)[2] = (uint8_t)((v) >> 16); \\\n (p)[3] = (uint8_t)((v) >> 24); }\n#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))\n#define SIPROUND \\\n { v0 += v1; v1 = ROTL(v1, 13); \\\n v1 ^= v0; v0 = ROTL(v0, 32); \\\n v2 += v3; v3 = ROTL(v3, 16); \\\n v3 ^= v2; \\\n v0 += v3; v3 = ROTL(v3, 21); \\\n v3 ^= v0; \\\n v2 += v1; v1 = ROTL(v1, 17); \\\n v1 ^= v2; v2 = ROTL(v2, 32); }\n uint64_t k0 = U8TO64_LE((uint8_t*)&seed0);\n uint64_t k1 = U8TO64_LE((uint8_t*)&seed1);\n uint64_t v3 = UINT64_C(0x7465646279746573) ^ k1;\n uint64_t v2 = UINT64_C(0x6c7967656e657261) ^ k0;\n uint64_t v1 = UINT64_C(0x646f72616e646f6d) ^ k1;\n uint64_t v0 = UINT64_C(0x736f6d6570736575) ^ k0;\n const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t));\n for (; in != end; in += 8) {\n uint64_t m = U8TO64_LE(in);\n v3 ^= m;\n SIPROUND; SIPROUND;\n v0 ^= m;\n }\n const int left = inlen & 7;\n uint64_t b = ((uint64_t)inlen) << 56;\n switch (left) {\n case 7: b |= ((uint64_t)in[6]) << 48; /* fall through */\n case 6: b |= ((uint64_t)in[5]) << 40; /* fall through */\n case 5: b |= ((uint64_t)in[4]) << 32; /* fall through */\n case 4: b |= ((uint64_t)in[3]) << 24; /* fall through */\n case 3: b |= ((uint64_t)in[2]) << 16; /* fall through */\n case 2: b |= ((uint64_t)in[1]) << 8; /* fall through */\n case 1: b |= ((uint64_t)in[0]); break;\n case 0: break;\n }\n v3 ^= b;\n SIPROUND; SIPROUND;\n v0 ^= b;\n v2 ^= 0xff;\n SIPROUND; SIPROUND; SIPROUND; SIPROUND;\n b = v0 ^ v1 ^ v2 ^ v3;\n uint64_t out = 0;\n U64TO8_LE((uint8_t*)&out, b);\n return out;\n}\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n//\n// Murmur3_86_128\n//-----------------------------------------------------------------------------\nstatic uint64_t MM86128(const void *key, const int len, uint32_t seed) {\n#define\tROTL32(x, r) ((x << r) | (x >> (32 - r)))\n#define FMIX32(h) h^=h>>16; h*=0x85ebca6b; h^=h>>13; h*=0xc2b2ae35; h^=h>>16;\n const uint8_t * data = (const uint8_t*)key;\n const int nblocks = len / 16;\n uint32_t h1 = seed;\n uint32_t h2 = seed;\n uint32_t h3 = seed;\n uint32_t h4 = seed;\n uint32_t c1 = 0x239b961b; \n uint32_t c2 = 0xab0e9789;\n uint32_t c3 = 0x38b34ae5; \n uint32_t c4 = 0xa1e38b93;\n const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);\n for (int i = -nblocks; i; i++) {\n uint32_t k1 = blocks[i*4+0];\n uint32_t k2 = blocks[i*4+1];\n uint32_t k3 = blocks[i*4+2];\n uint32_t k4 = blocks[i*4+3];\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;\n }\n const uint8_t * tail = (const uint8_t*)(data + nblocks*16);\n uint32_t k1 = 0;\n uint32_t k2 = 0;\n uint32_t k3 = 0;\n uint32_t k4 = 0;\n switch(len & 15) {\n case 15: k4 ^= tail[14] << 16; /* fall through */\n case 14: k4 ^= tail[13] << 8; /* fall through */\n case 13: k4 ^= tail[12] << 0;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n /* fall through */\n case 12: k3 ^= tail[11] << 24; /* fall through */\n case 11: k3 ^= tail[10] << 16; /* fall through */\n case 10: k3 ^= tail[ 9] << 8; /* fall through */\n case 9: k3 ^= tail[ 8] << 0;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n /* fall through */\n case 8: k2 ^= tail[ 7] << 24; /* fall through */\n case 7: k2 ^= tail[ 6] << 16; /* fall through */\n case 6: k2 ^= tail[ 5] << 8; /* fall through */\n case 5: k2 ^= tail[ 4] << 0;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n /* fall through */\n case 4: k1 ^= tail[ 3] << 24; /* fall through */\n case 3: k1 ^= tail[ 2] << 16; /* fall through */\n case 2: k1 ^= tail[ 1] << 8; /* fall through */\n case 1: k1 ^= tail[ 0] << 0;\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n /* fall through */\n };\n h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n FMIX32(h1); FMIX32(h2); FMIX32(h3); FMIX32(h4);\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n return (((uint64_t)h2)<<32)|h1;\n}\n\n//-----------------------------------------------------------------------------\n// xxHash Library\n// Copyright (c) 2012-2021 Yann Collet\n// All rights reserved.\n// \n// BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)\n//\n// xxHash3\n//-----------------------------------------------------------------------------\n#define XXH_PRIME_1 11400714785074694791ULL\n#define XXH_PRIME_2 14029467366897019727ULL\n#define XXH_PRIME_3 1609587929392839161ULL\n#define XXH_PRIME_4 9650029242287828579ULL\n#define XXH_PRIME_5 2870177450012600261ULL\n\nstatic uint64_t XXH_read64(const void* memptr) {\n uint64_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint32_t XXH_read32(const void* memptr) {\n uint32_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint64_t XXH_rotl64(uint64_t x, int r) {\n return (x << r) | (x >> (64 - r));\n}\n\nstatic uint64_t xxh3(const void* data, size_t len, uint64_t seed) {\n const uint8_t* p = (const uint8_t*)data;\n const uint8_t* const end = p + len;\n uint64_t h64;\n\n if (len >= 32) {\n const uint8_t* const limit = end - 32;\n uint64_t v1 = seed + XXH_PRIME_1 + XXH_PRIME_2;\n uint64_t v2 = seed + XXH_PRIME_2;\n uint64_t v3 = seed + 0;\n uint64_t v4 = seed - XXH_PRIME_1;\n\n do {\n v1 += XXH_read64(p) * XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n\n v2 += XXH_read64(p + 8) * XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n\n v3 += XXH_read64(p + 16) * XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n\n v4 += XXH_read64(p + 24) * XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n\n p += 32;\n } while (p <= limit);\n\n h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + \n XXH_rotl64(v4, 18);\n\n v1 *= XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n h64 ^= v1;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v2 *= XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n h64 ^= v2;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v3 *= XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n h64 ^= v3;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v4 *= XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n h64 ^= v4;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n }\n else {\n h64 = seed + XXH_PRIME_5;\n }\n\n h64 += (uint64_t)len;\n\n while (p + 8 <= end) {\n uint64_t k1 = XXH_read64(p);\n k1 *= XXH_PRIME_2;\n k1 = XXH_rotl64(k1, 31);\n k1 *= XXH_PRIME_1;\n h64 ^= k1;\n h64 = XXH_rotl64(h64, 27) * XXH_PRIME_1 + XXH_PRIME_4;\n p += 8;\n }\n\n if (p + 4 <= end) {\n h64 ^= (uint64_t)(XXH_read32(p)) * XXH_PRIME_1;\n h64 = XXH_rotl64(h64, 23) * XXH_PRIME_2 + XXH_PRIME_3;\n p += 4;\n }\n\n while (p < end) {\n h64 ^= (*p) * XXH_PRIME_5;\n h64 = XXH_rotl64(h64, 11) * XXH_PRIME_1;\n p++;\n }\n\n h64 ^= h64 >> 33;\n h64 *= XXH_PRIME_2;\n h64 ^= h64 >> 29;\n h64 *= XXH_PRIME_3;\n h64 ^= h64 >> 32;\n\n return h64;\n}\n\n// hashmap_sip returns a hash value for `data` using SipHash-2-4.\nuint64_t hashmap_sip(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n return SIP64((uint8_t*)data, len, seed0, seed1);\n}\n\n// hashmap_murmur returns a hash value for `data` using Murmur3_86_128.\nuint64_t hashmap_murmur(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return MM86128(data, len, seed0);\n}\n\nuint64_t hashmap_xxhash3(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return xxh3(data, len ,seed0);\n}\n\n//==============================================================================\n// TESTS AND BENCHMARKS\n// $ cc -DHASHMAP_TEST hashmap.c && ./a.out # run tests\n// $ cc -DHASHMAP_TEST -O3 hashmap.c && BENCH=1 ./a.out # run benchmarks\n//==============================================================================\n#ifdef HASHMAP_TEST\n\nstatic size_t deepcount(struct hashmap *map) {\n size_t count = 0;\n for (size_t i = 0; i < map->nbuckets; i++) {\n if (bucket_at(map, i)->dib) {\n count++;\n }\n }\n return count;\n}\n\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wpedantic\"\n#endif\n#ifdef __clang__\n#pragma GCC diagnostic ignored \"-Wunknown-warning-option\"\n#pragma GCC diagnostic ignored \"-Wcompound-token-split-by-macro\"\n#pragma GCC diagnostic ignored \"-Wgnu-statement-expression-from-macro-expansion\"\n#endif\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\nstatic bool rand_alloc_fail = false;\nstatic int rand_alloc_fail_odds = 3; // 1 in 3 chance malloc will fail.\nstatic uintptr_t total_allocs = 0;\nstatic uintptr_t total_mem = 0;\n\nstatic void *xmalloc(size_t size) {\n if (rand_alloc_fail && rand()%rand_alloc_fail_odds == 0) {\n return NULL;\n }\n void *mem = malloc(sizeof(uintptr_t)+size);\n assert(mem);\n *(uintptr_t*)mem = size;\n total_allocs++;\n total_mem += size;\n return (char*)mem+sizeof(uintptr_t);\n}\n\nstatic void xfree(void *ptr) {\n if (ptr) {\n total_mem -= *(uintptr_t*)((char*)ptr-sizeof(uintptr_t));\n free((char*)ptr-sizeof(uintptr_t));\n total_allocs--;\n }\n}\n\nstatic void shuffle(void *array, size_t numels, size_t elsize) {\n char tmp[elsize];\n char *arr = array;\n for (size_t i = 0; i < numels - 1; i++) {\n int j = i + rand() / (RAND_MAX / (numels - i) + 1);\n memcpy(tmp, arr + j * elsize, elsize);\n memcpy(arr + j * elsize, arr + i * elsize, elsize);\n memcpy(arr + i * elsize, tmp, elsize);\n }\n}\n\nstatic bool iter_ints(const void *item, void *udata) {\n int *vals = *(int**)udata;\n vals[*(int*)item] = 1;\n return true;\n}\n\nstatic int compare_ints_udata(const void *a, const void *b, void *udata) {\n return *(int*)a - *(int*)b;\n}\n\nstatic int compare_strs(const void *a, const void *b, void *udata) {\n return strcmp(*(char**)a, *(char**)b);\n}\n\nstatic uint64_t hash_int(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(item, sizeof(int), seed0, seed1);\n // return hashmap_sip(item, sizeof(int), seed0, seed1);\n // return hashmap_murmur(item, sizeof(int), seed0, seed1);\n}\n\nstatic uint64_t hash_str(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_sip(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_murmur(*(char**)item, strlen(*(char**)item), seed0, seed1);\n}\n\nstatic void free_str(void *item) {\n xfree(*(char**)item);\n}\n\nstatic void all(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):2000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n rand_alloc_fail = true;\n\n // test sip and murmur hashes\n assert(hashmap_sip(\"hello\", 5, 1, 2) == 2957200328589801622);\n assert(hashmap_murmur(\"hello\", 5, 1, 2) == 1682575153221130884);\n assert(hashmap_xxhash3(\"hello\", 5, 1, 2) == 2584346877953614258);\n\n int *vals;\n while (!(vals = xmalloc(N * sizeof(int)))) {}\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n struct hashmap *map;\n\n while (!(map = hashmap_new(sizeof(int), 0, seed, seed, \n hash_int, compare_ints_udata, NULL, NULL))) {}\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n // // printf(\"== %d ==\\n\", vals[i]);\n assert(map->count == (size_t)i);\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n const int *v;\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n \n for (int j = 0; j < i; j++) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n while (true) {\n v = hashmap_set(map, &vals[i]);\n if (!v) {\n assert(hashmap_oom(map));\n continue;\n } else {\n assert(!hashmap_oom(map));\n assert(v && *v == vals[i]);\n break;\n }\n }\n v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n assert(!hashmap_set(map, &vals[i]));\n assert(map->count == (size_t)(i+1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n }\n\n int *vals2;\n while (!(vals2 = xmalloc(N * sizeof(int)))) {}\n memset(vals2, 0, N * sizeof(int));\n assert(hashmap_scan(map, iter_ints, &vals2));\n\n // Test hashmap_iter. This does the same as hashmap_scan above.\n size_t iter = 0;\n void *iter_val;\n while (hashmap_iter (map, &iter, &iter_val)) {\n assert (iter_ints(iter_val, &vals2));\n }\n for (int i = 0; i < N; i++) {\n assert(vals2[i] == 1);\n }\n xfree(vals2);\n\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n const int *v;\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(map->count == (size_t)(N-i-1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n for (int j = N-1; j > i; j--) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n }\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n assert(map->count != 0);\n size_t prev_cap = map->cap;\n hashmap_clear(map, true);\n assert(prev_cap < map->cap);\n assert(map->count == 0);\n\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n prev_cap = map->cap;\n hashmap_clear(map, false);\n assert(prev_cap == map->cap);\n\n hashmap_free(map);\n\n xfree(vals);\n\n\n while (!(map = hashmap_new(sizeof(char*), 0, seed, seed,\n hash_str, compare_strs, free_str, NULL)));\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_clear(map, false);\n assert(hashmap_count(map) == 0);\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_free(map);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\n#define bench(name, N, code) {{ \\\n if (strlen(name) > 0) { \\\n printf(\"%-14s \", name); \\\n } \\\n size_t tmem = total_mem; \\\n size_t tallocs = total_allocs; \\\n uint64_t bytes = 0; \\\n clock_t begin = clock(); \\\n for (int i = 0; i < N; i++) { \\\n (code); \\\n } \\\n clock_t end = clock(); \\\n double elapsed_secs = (double)(end - begin) / CLOCKS_PER_SEC; \\\n double bytes_sec = (double)bytes/elapsed_secs; \\\n printf(\"%d ops in %.3f secs, %.0f ns/op, %.0f op/sec\", \\\n N, elapsed_secs, \\\n elapsed_secs/(double)N*1e9, \\\n (double)N/elapsed_secs \\\n ); \\\n if (bytes > 0) { \\\n printf(\", %.1f GB/sec\", bytes_sec/1024/1024/1024); \\\n } \\\n if (total_mem > tmem) { \\\n size_t used_mem = total_mem-tmem; \\\n printf(\", %.2f bytes/op\", (double)used_mem/N); \\\n } \\\n if (total_allocs > tallocs) { \\\n size_t used_allocs = total_allocs-tallocs; \\\n printf(\", %.2f allocs/op\", (double)used_allocs/N); \\\n } \\\n printf(\"\\n\"); \\\n}}\n\nstatic void benchmarks(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):5000000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n\n int *vals = xmalloc(N * sizeof(int));\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n shuffle(vals, N, sizeof(int));\n\n struct hashmap *map;\n shuffle(vals, N, sizeof(int));\n\n map = hashmap_new(sizeof(int), 0, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete\", N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n hashmap_free(map);\n\n map = hashmap_new(sizeof(int), N, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set (cap)\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get (cap)\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete (cap)\" , N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n\n hashmap_free(map);\n\n \n xfree(vals);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\nint main(void) {\n hashmap_set_allocator(xmalloc, xfree);\n\n if (getenv(\"BENCH\")) {\n printf(\"Running hashmap.c benchmarks...\\n\");\n benchmarks();\n } else {\n printf(\"Running hashmap.c tests...\\n\");\n all();\n printf(\"PASSED\\n\");\n }\n}\n\n\n#endif\n\n\n"], ["/pogocache/src/util.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit util.c provides various utilities and convenience functions.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n\n// Performs a case-insenstive equality test between the byte slice 'data' and\n// a c-string. It's expected that c-string is already lowercase and \n// null-terminated. The data does not need to be null-terminated.\nbool argeq_bytes(const void *data, size_t datalen, const char *cstr) {\n const char *p = data;\n const char *e = p+datalen;\n bool eq = true;\n while (eq && p < e && *cstr) {\n eq = tolower(*p) == *cstr;\n p++;\n cstr++;\n }\n return eq && *cstr == '\\0' && p == e;\n}\n\nbool argeq(struct args *args, int idx, const char *cstr) {\n return argeq_bytes(args->bufs[idx].data, args->bufs[idx].len, cstr);\n}\n\n// Safely adds two int64_t values and with clamping on overflow.\nint64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n// Safely multiplies two int64_t values and with clamping on overflow.\nint64_t int64_mul_clamp(int64_t a, int64_t b) {\n if (a || b) {\n if (a > 0) {\n if (b > 0 && a > INT64_MAX / b) {\n return INT64_MAX;\n } else if (b < 0 && b < INT64_MIN / a) {\n return INT64_MIN;\n }\n } else {\n if (b > 0 && a < INT64_MIN / b) {\n return INT64_MIN;\n } else if (b < 0 && a < INT64_MAX / b) {\n return INT64_MAX;\n }\n }\n }\n return a * b;\n}\n\n/// https://github.com/tidwall/varint.c\nint varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nint varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\nint varint_write_i64(void *data, int64_t x) {\n uint64_t ux = (uint64_t)x << 1;\n ux = x < 0 ? ~ux : ux;\n return varint_write_u64(data, ux);\n}\n\nint varint_read_i64(const void *data, size_t len, int64_t *x) {\n uint64_t ux;\n int n = varint_read_u64(data, len, &ux);\n *x = (int64_t)(ux >> 1);\n *x = ux&1 ? ~*x : *x;\n return n;\n}\n\n\nconst char *memstr(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0fB\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fK\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fM\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1fG\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0G\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0M\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0K\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\nconst char *memstr_long(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0f bytes\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f KB\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f MB\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1f GB\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0 GB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 MB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 KB\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nuint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\nuint64_t rand_next(uint64_t *seed) {\n // pcg + mix13\n *seed = (*seed * UINT64_C(6364136223846793005)) + 1;\n return mix13(*seed);\n}\n\nvoid write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n bytes[4] = (x>>32)&0xFF;\n bytes[5] = (x>>40)&0xFF;\n bytes[6] = (x>>48)&0xFF;\n bytes[7] = (x>>56)&0xFF;\n}\n\nuint64_t read_u64(const void *data) {\n const uint8_t *bytes = data;\n uint64_t x = 0;\n x |= ((uint64_t)bytes[0])<<0;\n x |= ((uint64_t)bytes[1])<<8;\n x |= ((uint64_t)bytes[2])<<16;\n x |= ((uint64_t)bytes[3])<<24;\n x |= ((uint64_t)bytes[4])<<32;\n x |= ((uint64_t)bytes[5])<<40;\n x |= ((uint64_t)bytes[6])<<48;\n x |= ((uint64_t)bytes[7])<<56;\n return x;\n}\n\nvoid write_u32(void *data, uint32_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n}\n\nuint32_t read_u32(const void *data) {\n const uint8_t *bytes = data;\n uint32_t x = 0;\n x |= ((uint32_t)bytes[0])<<0;\n x |= ((uint32_t)bytes[1])<<8;\n x |= ((uint32_t)bytes[2])<<16;\n x |= ((uint32_t)bytes[3])<<24;\n return x;\n}\n\n// https://www.w3.org/TR/2003/REC-PNG-20031110/#D-CRCAppendix\nuint32_t crc32(const void *data, size_t len) {\n static __thread uint32_t table[256];\n static __thread bool computed = false;\n if (!computed) {\n for (uint32_t n = 0; n < 256; n++) {\n uint32_t c = n;\n for (int k = 0; k < 8; k++) {\n c = (c&1)?0xedb88320L^(c>>1):c>>1;\n }\n table[n] = c;\n }\n computed = true;\n }\n uint32_t crc = ~0;\n const uint8_t *buf = data;\n for (size_t n = 0; n < len; n++) {\n crc = table[(crc^buf[n])&0xff]^(crc>>8);\n }\n return ~crc;\n}\n\n// Attempts to read exactly len bytes from file stream\n// Returns the number of bytes read. Anything less than len means the stream\n// was closed or an error occured while reading.\n// Return -1 if no bytes were read and there was an error.\nssize_t read_full(int fd, void *data, size_t len) {\n uint8_t *bytes = data;\n size_t total = 0;\n while (len > 0) {\n ssize_t n = read(fd, bytes+total, len);\n if (n <= 0) {\n if (total > 0) {\n break;\n }\n return n;\n }\n len -= n;\n total += n;\n }\n return total;\n}\n\nsize_t u64toa(uint64_t x, uint8_t *data) {\n if (x < 10) {\n data[0] = '0'+x;\n return 1;\n }\n size_t i = 0;\n do {\n data[i++] = '0' + x % 10;\n } while ((x /= 10) > 0);\n // reverse the characters\n for (size_t j = 0, k = i-1; j < k; j++, k--) {\n uint8_t ch = data[j];\n data[j] = data[k];\n data[k] = ch;\n }\n return i;\n}\n\nsize_t i64toa(int64_t x, uint8_t *data) {\n if (x < 0) {\n data[0] = '-';\n data++;\n return u64toa(x * -1, data) + 1;\n }\n return u64toa(x, data);\n}\n\nuint32_t fnv1a_case(const char* buf, size_t len) {\n uint32_t hash = 0x811c9dc5;\n for (size_t i = 0; i < len; i++) {\n hash = (hash ^ tolower(buf[i])) * 0x01000193;\n }\n\treturn hash;\n}\n\nbool parse_i64(const char *data, size_t len, int64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n errno = 0;\n char *end;\n *x = strtoll(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool parse_u64(const char *data, size_t len, uint64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n if (buf[0] == '-') {\n return false;\n }\n errno = 0;\n char *end;\n *x = strtoull(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool argi64(struct args *args, int idx, int64_t *x) {\n return parse_i64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nbool argu64(struct args *args, int idx, uint64_t *x) {\n return parse_u64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nvoid *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\nvoid store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// Increment a morris counter. The counter is clipped to 31 bits\nuint8_t morris_incr(uint8_t morris, uint64_t rand) {\n return morris>=31?31:morris+!(rand&((UINT64_C(1)< '~') {\n printf(\"\\\\x%02x\", c);\n } else {\n printf(\"%c\", c);\n }\n }\n}\n"], ["/pogocache/src/sys.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit sys.c provides various system-level functions.\n#if __linux__\n#define _GNU_SOURCE\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#ifdef __APPLE__\n#include \n#include \n#endif\n#include \"sys.h\"\n\nint sys_nprocs(void) {\n static atomic_int nprocsa = 0;\n int nprocs = atomic_load_explicit(&nprocsa, __ATOMIC_RELAXED);\n if (nprocs > 0) {\n return nprocs;\n }\n int logical = sysconf(_SC_NPROCESSORS_CONF);\n logical = logical < 1 ? 1 : logical;\n int physical = logical;\n int affinity = physical;\n#ifdef __linux__\n affinity = 0;\n cpu_set_t mask;\n CPU_ZERO(&mask);\n if (sched_getaffinity(0, sizeof(mask), &mask) == -1) {\n perror(\"sched_getaffinity\");\n return 1;\n }\n for (int i = 0; i < CPU_SETSIZE; i++) {\n if (CPU_ISSET(i, &mask)) {\n affinity++;\n }\n }\n double hyper = ceil((double)logical / (double)physical);\n hyper = hyper < 1 ? 1 : hyper;\n affinity /= hyper;\n#endif\n nprocs = affinity;\n nprocs = nprocs < 1 ? 1 : nprocs;\n atomic_store_explicit(&nprocsa, nprocs, __ATOMIC_RELAXED);\n return nprocs;\n}\n\n#ifndef __linux__\n#include \n#endif\n\nsize_t sys_memory(void) {\n size_t sysmem = 0;\n#ifdef __linux__\n FILE *f = fopen(\"/proc/meminfo\", \"rb\");\n if (f) {\n char buf[4096];\n size_t n = fread(buf, 1, sizeof(buf)-1, f);\n buf[n] = '\\0';\n char *s = 0;\n char *e = 0;\n s = strstr(buf, \"MemTotal\");\n if (s) s = strstr(s, \": \");\n if (s) e = strstr(s, \"\\n\");\n if (e) {\n *e = '\\0';\n s += 2;\n while (isspace(*s)) s++;\n if (strstr(s, \" kB\")) {\n s[strstr(s, \" kB\")-s] = '\\0';\n }\n errno = 0;\n char *end;\n int64_t isysmem = strtoll(s, &end, 10);\n assert(errno == 0 && isysmem > 0);\n isysmem *= 1024;\n sysmem = isysmem;\n }\n fclose(f);\n }\n#else\n size_t memsize = 0;\n size_t len = sizeof(memsize);\n if (sysctlbyname(\"hw.memsize\", &memsize, &len, 0, 0) == 0) {\n sysmem = memsize;\n }\n#endif\n if (sysmem == 0) {\n fprintf(stderr, \"# could not detect total system memory, bailing\\n\");\n exit(1);\n }\n return sysmem;\n}\n\nuint64_t sys_seed(void) {\n #define NSEEDCAP 64\n static __thread int nseeds = 0;\n static __thread uint64_t seeds[NSEEDCAP];\n if (nseeds == 0) {\n // Generate a group of new seeds\n FILE *f = fopen(\"/dev/urandom\", \"rb+\");\n if (!f) {\n perror(\"# /dev/urandom\");\n exit(1);\n }\n size_t n = fread(seeds, 8, NSEEDCAP, f);\n (void)n;\n assert(n == NSEEDCAP);\n fclose(f);\n nseeds = NSEEDCAP;\n }\n return seeds[--nseeds];\n}\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// Return monotonic nanoseconds of the CPU clock.\nint64_t sys_now(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// Return unix timestamp in nanoseconds\nint64_t sys_unixnow(void) {\n struct timespec now = { 0 };\n clock_gettime(CLOCK_REALTIME, &now);\n return nanotime(&now);\n}\n\n#ifdef __APPLE__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n task_basic_info_data_t taskInfo;\n mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT;\n kern_return_t kr = task_info(mach_task_self(), TASK_BASIC_INFO,\n (task_info_t)&taskInfo, &infoCount);\n if (kr != KERN_SUCCESS) {\n fprintf(stderr, \"# task_info: %s\\n\", mach_error_string(kr));\n abort();\n }\n info->virt = taskInfo.virtual_size;\n info->rss = taskInfo.resident_size;\n}\n#elif __linux__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n FILE *f = fopen(\"/proc/self/statm\", \"r\");\n if (!f) {\n perror(\"# open /proc/self/statm\");\n abort();\n }\n unsigned long vm_pages, rss_pages;\n long x = fscanf(f, \"%lu %lu\", &vm_pages, &rss_pages);\n fclose(f);\n if (x != 2) {\n perror(\"# read /proc/self/statm\");\n abort();\n }\n\n // Get the system page size (in bytes)\n size_t page_size = sysconf(_SC_PAGESIZE);\n assert(page_size > 0);\n\n // Convert pages to bytes\n info->virt = vm_pages * page_size;\n info->rss = rss_pages * page_size;\n}\n#endif\n\n#include \n\nconst char *sys_arch(void) {\n static __thread bool got = false;\n static __thread char arch[1024] = \"unknown/error\";\n if (!got) {\n struct utsname unameData;\n if (uname(&unameData) == 0) {\n snprintf(arch, sizeof(arch), \"%s/%s\", unameData.sysname, \n unameData.machine);\n char *p = arch;\n while (*p) {\n *p = tolower(*p);\n p++;\n }\n got = true;\n }\n }\n return arch;\n}\n\nvoid sys_genuseid(char useid[16]) {\n const uint8_t chs[] = \n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"0123456789\";\n uint64_t a = sys_seed();\n uint64_t b = sys_seed();\n uint8_t bytes[16];\n memcpy(bytes, &a, 8);\n memcpy(bytes+8, &b, 8);\n for (int i = 0; i < 16; i++) {\n bytes[i] = chs[bytes[i]%62];\n }\n memcpy(useid, bytes, 16);\n}\n\n// Returns a unique thread id for the current thread.\n// This is an artificial generated value that is always distinct. \nuint64_t sys_threadid(void) {\n static atomic_int_fast64_t next = 0;\n static __thread uint64_t id = 0;\n if (id == 0) {\n id = atomic_fetch_add_explicit(&next, 1, __ATOMIC_RELEASE);\n }\n return id;\n}\n"], ["/pogocache/src/memcache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit memcache.c provides the parser for the Memcache wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\nstatic __thread size_t mc_n = 0;\n\nsize_t parse_lastmc_n(void) {\n return mc_n;\n}\n\nbool mc_valid_key(struct args *args, int i) {\n const uint8_t *key = (uint8_t*)args->bufs[i].data;\n size_t len = args->bufs[i].len;\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] == 0x7F) {\n return false;\n }\n }\n return true;\n}\n\nenum mc_cmd { MC_UNKNOWN, \n // writers (optional reply)\n MC_SET, MC_ADD, MC_REPLACE, MC_APPEND, MC_PREPEND, MC_CAS, // storage\n MC_INCR, MC_DECR, // increment/decrement\n MC_FLUSH_ALL, MC_DELETE, // deletion\n MC_TOUCH, // touch\n MC_VERBOSITY, // logging\n // readers (always replys)\n MC_GET, MC_GETS, // retreival\n MC_GAT, MC_GATS, // get and touch\n MC_VERSION, MC_STATS, // information\n MC_QUIT, // client\n};\n\nstatic bool is_mc_store_cmd(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_CAS;\n}\n\nstatic bool is_mc_noreplyable(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_VERBOSITY;\n}\n\nstatic ssize_t parse_memcache_telnet(const char *data, size_t len, \n struct args *args)\n{\n const char *p = data;\n const char *end = data+len;\n const char *s = p;\n char last = 0;\n while (p < end) {\n char ch = *(p++);\n if (ch == ' ') {\n size_t wn = p-s-1;\n // if (wn > 0) {\n args_append(args, s, wn, true);\n s = p;\n continue;\n }\n if (ch == '\\n') {\n size_t wn = p-s-1;\n if (last == '\\r') {\n wn--;\n }\n if (wn > 0) {\n args_append(args, s, wn, true);\n }\n return p-data;\n }\n last = ch;\n }\n return 0;\n}\n\nssize_t parse_memcache(const char *data, size_t len, struct args *args, \n bool *noreply)\n{\n ssize_t n = parse_memcache_telnet(data, len, args);\n if (n <= 0 || args->len == 0) {\n return n;\n }\n // args_print(args);\n mc_n = n;\n enum mc_cmd cmd;\n struct args args2 = { 0 };\n *noreply = false;\n // check for common get-2\n if (args->len == 2 && arg_const_eq(args, 0, \"get\")) {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args->bufs[0].data = \"mget\";\n args->bufs[0].len = 4;\n return n;\n }\n // Check for common set-5 (allows for expiry)\n if (args->len == 5 && arg_const_eq(args, 0, \"set\")) {\n if (args->bufs[2].len == 1 && args->bufs[2].data[0] == '0') {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool expset = false;\n int64_t x;\n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n expset = true;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n // replace the \"flags\" with a value\n args->bufs[2].len = value_len;\n args->bufs[2].data = (void*)value;\n args->len = 3;\n if (expset) {\n // add the \"ex \" to last two arguments\n args->bufs[4] = args->bufs[3];\n args->bufs[3].data = \"ex\";\n args->bufs[3].len = 2;\n args->len = 5;\n }\n return n;\n } else {\n // flags was set, use plus branch\n cmd = MC_SET;\n goto set_plus;\n }\n }\n // Otherwise use lookup command table. This could be optimized into a\n // switch table or hash table. See cmds.c for hash table example.\n cmd =\n arg_const_eq(args, 0, \"set\") ? MC_SET : // XY\n arg_const_eq(args, 0, \"add\") ? MC_ADD : // XY\n arg_const_eq(args, 0, \"cas\") ? MC_CAS : // XY\n arg_const_eq(args, 0, \"replace\") ? MC_REPLACE : // XY\n arg_const_eq(args, 0, \"get\") ? MC_GET : // XY\n arg_const_eq(args, 0, \"delete\") ? MC_DELETE : // XY\n arg_const_eq(args, 0, \"append\") ? MC_APPEND : // XY\n arg_const_eq(args, 0, \"prepend\") ? MC_PREPEND : // XY\n arg_const_eq(args, 0, \"gets\") ? MC_GETS : // XY\n arg_const_eq(args, 0, \"incr\") ? MC_INCR : // XY\n arg_const_eq(args, 0, \"decr\") ? MC_DECR: // XY\n arg_const_eq(args, 0, \"touch\") ? MC_TOUCH : // X\n arg_const_eq(args, 0, \"gat\") ? MC_GAT : // X\n arg_const_eq(args, 0, \"gats\") ? MC_GATS : // X\n arg_const_eq(args, 0, \"flush_all\") ? MC_FLUSH_ALL : // X\n arg_const_eq(args, 0, \"stats\") ? MC_STATS : // X\n arg_const_eq(args, 0, \"version\") ? MC_VERSION : // X\n arg_const_eq(args, 0, \"quit\") ? MC_QUIT : // XY\n arg_const_eq(args, 0, \"verbosity\") ? MC_VERBOSITY : // X\n MC_UNKNOWN;\n if (cmd == MC_UNKNOWN) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (is_mc_noreplyable(cmd)) {\n if (arg_const_eq(args, args->len-1, \"noreply\")) {\n *noreply = true;\n buf_clear(&args->bufs[args->len-1]);\n args->len--;\n }\n }\n if (is_mc_store_cmd(cmd)) {\n // Store commands include 'set', 'add', 'replace', 'append', 'prepend',\n // and 'cas'.\n if ((cmd == MC_CAS && args->len != 6) && \n (cmd != MC_CAS && args->len != 5))\n {\n parse_seterror(\"ERROR\");\n return -1;\n }\n set_plus:\n // check all values before continuing\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n int64_t x;\n if (!argi64(args, 2, &x) || x < 0) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool hascas = false;\n char cas[24] = \"0\";\n if (cmd == MC_CAS) {\n hascas = true;\n uint64_t y;\n if (!argu64(args, 5, &y)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n assert(args->bufs[5].len < sizeof(cas));\n memcpy(cas, args->bufs[5].data, args->bufs[5].len);\n cas[args->bufs[5].len] = '\\0';\n buf_clear(&args->bufs[5]);\n args->len--;\n }\n\n // Storage commands must read a value that follows the first line.\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n\n // Reconstruct the command into a RESP format. \n bool is_append_prepend = false;\n switch (cmd) {\n case MC_APPEND:\n args_append(&args2, \"append\", 6, true);\n is_append_prepend = true;\n break;\n case MC_PREPEND:\n args_append(&args2, \"prepend\", 7, true);\n is_append_prepend = true;\n break;\n default:\n args_append(&args2, \"set\", 3, true);\n break;\n }\n // Move key arg to new args\n take_and_append_arg(1);\n // Add value arg\n args_append(&args2, value, value_len, true);\n if (!is_append_prepend) {\n if (!(args->bufs[2].len == 1 && args->bufs[2].data[0] == '0')) {\n args_append(&args2, \"flags\", 5, true);\n take_and_append_arg(2);\n }\n \n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n args_append(&args2, \"ex\", 2, true);\n take_and_append_arg(3);\n }\n if (cmd == MC_ADD) {\n args_append(&args2, \"nx\", 2, true);\n } else if (cmd == MC_REPLACE) {\n args_append(&args2, \"xx\", 2, true);\n }\n if (hascas) {\n args_append(&args2, \"cas\", 3, true);\n args_append(&args2, cas, strlen(cas), false);\n }\n }\n } else if (cmd == MC_GET) {\n // Convert 'get * into 'MGET *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mget\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_DELETE) {\n // Convert 'delete ' into 'DEL '\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (args->len > 2) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"del\", 3, true);\n take_and_append_arg(1);\n } else if (cmd == MC_GETS) {\n // Convert 'gets * into 'MGETS *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mgets\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GAT) {\n // Convert 'gat * into 'gat *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gat\", 3, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GATS) {\n // Convert 'gats * into 'gats *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gats\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_STATS) {\n args_append(&args2, \"stats\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_INCR) {\n // Convert 'incr into 'uincrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"uincrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_DECR) {\n // Convert 'decr into 'udecrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"udecrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_TOUCH) {\n // Convert 'touch ' into 'expire '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"expire\", 6, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_FLUSH_ALL) {\n // Convert 'flush_all [delay]' into 'FLUSHALL [DELAY seconds]'\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"flushall\", 8, true);\n if (args->len == 2) {\n args_append(&args2, \"delay\", 5, true);\n take_and_append_arg(1);\n }\n } else if (cmd == MC_QUIT) {\n args_append(&args2, \"quit\", 4, true);\n *noreply = true;\n } else if (cmd == MC_VERSION) {\n args_append(&args2, \"version\", 7, true);\n *noreply = false;\n } else if (cmd == MC_VERBOSITY) {\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"verbosity\", 7, true);\n take_and_append_arg(1);\n } else {\n return -1;\n }\n args_free(args);\n *args = args2;\n return n;\n}\n"], ["/pogocache/src/resp.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit resp.c provides the parser for the RESP wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args) {\n char *err = NULL;\n struct buf arg = { 0 };\n bool inarg = false;\n char quote = '\\0';\n for (size_t i = 0; i < len; i++) {\n char ch = bytes[i];\n if (inarg) {\n if (quote) {\n if (ch == '\\n') {\n goto fail_quotes;\n }\n if (ch == quote) { \n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n inarg = false;\n if (ch == '\\n') {\n i--;\n continue;\n }\n if (!isspace(ch)) {\n goto fail_quotes;\n }\n continue;\n } else if (ch == '\\\\') {\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n switch (ch) {\n case 'n': ch = '\\n'; break;\n case 'r': ch = '\\r'; break;\n case 't': ch = '\\t'; break;\n }\n }\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n } else {\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else if (isspace(ch)) {\n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n if (ch == '\\n') {\n break;\n }\n inarg = false;\n } else {\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n } else {\n if (ch == '\\n') {\n buf_clear(&arg);\n return i+1;\n }\n if (isspace(ch)) {\n continue;\n }\n inarg = true;\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else {\n quote = 0;\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n }\n buf_clear(&arg);\n return 0;\nfail_quotes:\n if (!err) err = \"ERR Protocol error: unbalanced quotes in request\";\nfail_nargs:\n if (!err) err = \"ERR Protocol error: invalid multibulk length\";\nfail_argsz:\n if (!err) err = \"ERR Protocol error: invalid bulk length\";\n/* fail: */\n if (err) {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \"%s\", err);\n }\n buf_clear(&arg);\n return -1;\n}\n\nstatic int64_t read_num(const char *data, size_t len, int64_t min, int64_t max,\n bool *ok)\n{\n errno = 0;\n char *end;\n int64_t x = strtoll(data, &end, 10);\n *ok = errno == 0 && (size_t)(end-data) == len && x >= min && x <= max;\n return x;\n}\n\n#define read_resp_num(var, min, max, errmsg) { \\\n char *p = memchr(bytes, '\\r', end-bytes); \\\n if (!p) { \\\n if (end-bytes > 32) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n return 0; \\\n } \\\n if (p+1 == end) { \\\n return 0; \\\n } \\\n if (*(p+1) != '\\n') { \\\n return -1; \\\n } \\\n bool ok; \\\n var = read_num(bytes, p-bytes, min, max, &ok); \\\n if (!ok) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n bytes = p+2; \\\n}\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp(const char *bytes, size_t len, struct args *args) {\n const char *start = bytes;\n const char *end = bytes+len;\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '*') {\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nargs;\n read_resp_num(nargs, LONG_MIN, MAXARGS, \"invalid multibulk length\");\n for (int j = 0; j < nargs; j++) {\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '$') {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \n \"ERR Protocol error: expected '$', got '%c'\", *(bytes-1));\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nbytes;\n read_resp_num(nbytes, 0, MAXARGSZ, \"invalid bulk length\");\n if (nbytes+2 > end-bytes) {\n return 0;\n }\n args_append(args, bytes, nbytes, true);\n bytes += nbytes+2;\n }\n return bytes-start;\n}\n\n"], ["/pogocache/src/http.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit http.c provides the parser for the HTTP wire protocol.\n#define _GNU_SOURCE \n#include \n#include \n#include \n#include \n#include \"stats.h\"\n#include \"util.h\"\n#include \"parse.h\"\n\nextern const bool useauth;\nextern const char *auth;\n\nbool http_valid_key(const char *key, size_t len) {\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] >= 0x7F || key[i] == '%' || key[i] == '+' ||\n key[i] == '@' || key[i] == '$' || key[i] == '?' || key[i] == '=') \n {\n return false;\n }\n }\n return true;\n}\n\nssize_t parse_http(const char *data, size_t len, struct args *args, \n int *httpvers, bool *keepalive)\n{\n *keepalive = false;\n *httpvers = 0;\n const char *method = 0;\n size_t methodlen = 0;\n const char *uri = 0;\n size_t urilen = 0;\n int proto = 0;\n const char *hdrname = 0; \n size_t hdrnamelen = 0;\n const char *hdrval = 0;\n size_t hdrvallen = 0;\n size_t bodylen = 0;\n bool nocontentlength = true;\n bool html = false;\n const char *authhdr = 0;\n size_t authhdrlen = 0;\n const char *p = data;\n const char *e = p+len;\n const char *s = p;\n while (p < e) {\n if (*p == ' ') {\n method = s;\n methodlen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == ' ') {\n uri = s;\n urilen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n if (p-s-1 != 8 || !bytes_const_eq(s, 5, \"HTTP/\") || \n s[5] < '0' || s[5] > '9' || s[6] != '.' || \n s[7] < '0' || s[7] > '9')\n {\n goto badproto;\n }\n proto = (s[5]-'0')*10+(s[7]-'0');\n if (proto < 9 || proto >= 30) {\n goto badproto;\n }\n if (proto >= 11) {\n *keepalive = true;\n }\n *httpvers = proto;\n p++;\n goto readhdrs;\n }\n \n p++;\n }\n goto badreq;\nreadhdrs:\n // Parse the headers, pulling the pairs along the way.\n while (p < e) {\n hdrname = p;\n while (p < e) {\n if (*p == ':') {\n hdrnamelen = p-hdrname;\n p++;\n while (p < e && *p == ' ') {\n p++;\n }\n hdrval = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n hdrvallen = p-hdrval-1;\n // printf(\"[%.*s]=[%.*s]\\n\", (int)hdrnamelen, hdrname,\n // (int)hdrvallen, hdrval);\n // We have a new header pair (hdrname, hdrval);\n if (argeq_bytes(hdrname, hdrnamelen, \"content-length\")){\n uint64_t x;\n if (!parse_u64(hdrval, hdrvallen, &x) || \n x > MAXARGSZ)\n {\n stat_store_too_large_incr(0);\n goto badreq;\n }\n bodylen = x;\n nocontentlength = false;\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"connection\"))\n {\n *keepalive = argeq_bytes(hdrval, hdrvallen, \n \"keep-alive\");\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"accept\"))\n {\n if (memmem(hdrval, hdrvallen, \"text/html\", 9) != 0){\n html = true;\n }\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"authorization\"))\n {\n authhdr = hdrval;\n authhdrlen = hdrvallen;\n }\n p++;\n if (p < e && *p == '\\r') {\n p++;\n if (p < e && *p == '\\n') {\n p++;\n } else {\n goto badreq;\n }\n goto readbody;\n }\n break;\n }\n p++;\n }\n break;\n }\n p++;\n }\n }\n return 0;\nreadbody:\n // read the content body\n if ((size_t)(e-p) < bodylen) {\n return 0;\n }\n const char *body = p;\n p = e;\n\n // check\n if (urilen == 0 || uri[0] != '/') {\n goto badreq;\n }\n uri++;\n urilen--;\n const char *ex = 0;\n size_t exlen = 0;\n const char *flags = 0;\n size_t flagslen = 0;\n const char *cas = 0;\n size_t caslen = 0;\n const char *qauth = 0;\n size_t qauthlen = 0;\n bool xx = false;\n bool nx = false;\n // Parse the query string, pulling the pairs along the way.\n size_t querylen = 0;\n const char *query = memchr(uri, '?', urilen);\n if (query) {\n querylen = urilen-(query-uri);\n urilen = query-uri;\n query++;\n querylen--;\n const char *qkey;\n size_t qkeylen;\n const char *qval;\n size_t qvallen;\n size_t j = 0;\n size_t k = 0;\n for (size_t i = 0; i < querylen; i++) {\n if (query[i] == '=') {\n k = i;\n i++;\n for (; i < querylen; i++) {\n if (query[i] == '&') {\n break;\n }\n }\n qval = query+k+1;\n qvallen = i-k-1;\n qkeyonly:\n qkey = query+j;\n qkeylen = k-j;\n // We have a new query pair (qkey, qval);\n if (bytes_const_eq(qkey, qkeylen, \"flags\")) {\n flags = qval;\n flagslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"ex\") || \n bytes_const_eq(qkey, qkeylen, \"ttl\"))\n {\n ex = qval;\n exlen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"cas\")) {\n cas = qval;\n caslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"xx\")) {\n xx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"nx\")) {\n nx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"auth\")) {\n qauth = qval;\n qauthlen = qvallen;\n }\n j = i+1;\n } else if (query[i] == '&' || i == querylen-1) {\n qval = 0;\n qvallen = 0;\n if (i == querylen-1) {\n i++;\n }\n k = i;\n goto qkeyonly;\n }\n }\n }\n // The entire HTTP request is complete.\n // Turn request into valid command arguments.\n if (bytes_const_eq(method, methodlen, \"GET\")) {\n if (urilen > 0 && uri[0] == '@') {\n // system command such as @stats or @flushall\n goto badreq;\n } else if (urilen == 0) {\n goto showhelp;\n } else {\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"get\", 3, true);\n args_append(args, uri, urilen, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"PUT\")) {\n if (nocontentlength) {\n // goto badreq;\n }\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"set\", 3, true);\n args_append(args, uri, urilen, true);\n args_append(args, body, bodylen, true);\n if (cas) {\n args_append(args, \"cas\", 3, true);\n args_append(args, cas, caslen, true);\n }\n if (ex) {\n args_append(args, \"ex\", 2, true);\n args_append(args, ex, exlen, true);\n }\n if (flags) {\n args_append(args, \"flags\", 5, true);\n args_append(args, flags, flagslen, true);\n }\n if (xx) {\n args_append(args, \"xx\", 2, true);\n }\n if (nx) {\n args_append(args, \"nx\", 2, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"DELETE\")) {\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"del\", 3, true);\n args_append(args, uri, urilen, true);\n } else {\n parse_seterror(\"Method Not Allowed\");\n goto badreq;\n }\n\n // Check authorization\n const char *authval = 0;\n size_t authvallen = 0;\n if (qauthlen > 0) {\n authval = qauth;\n authvallen = qauthlen;\n } else if (authhdrlen > 0) {\n if (authhdrlen >= 7 && strncmp(authhdr, \"Bearer \", 7) == 0) {\n authval = authhdr + 7;\n authvallen = authhdrlen - 7;\n } else {\n goto unauthorized;\n }\n }\n if (useauth || authvallen > 0) {\n stat_auth_cmds_incr(0);\n size_t authlen = strlen(auth);\n if (authvallen != authlen || memcmp(auth, authval, authlen) != 0) {\n stat_auth_errors_incr(0);\n goto unauthorized;\n }\n\n }\n return e-data;\nbadreq:\n parse_seterror(\"Bad Request\");\n return -1;\nbadproto:\n parse_seterror(\"Bad Request\");\n return -1;\nbadkey:\n parse_seterror(\"Invalid Key\");\n return -1;\nunauthorized:\n parse_seterror(\"Unauthorized\");\n return -1;\nshowhelp:\n if (html) {\n parse_seterror(\"Show Help HTML\");\n } else {\n parse_seterror(\"Show Help TEXT\");\n }\n return -1;\n}\n"], ["/pogocache/src/parse.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit parse.c provides the entrypoint for parsing all data \n// for incoming client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n\n__thread char parse_lasterr[1024] = \"\";\n\nconst char *parse_lasterror(void) {\n return parse_lasterr;\n}\n\nssize_t parse_resp(const char *bytes, size_t len, struct args *args);\nssize_t parse_memcache(const char *data, size_t len, struct args *args,\n bool *noreply);\nssize_t parse_http(const char *data, size_t len, struct args *args,\n int *httpvers, bool *keepalive);\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args);\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pg);\n\nstatic bool sniff_proto(const char *data, size_t len, int *proto) {\n if (len > 0 && data[0] == '*') {\n *proto = PROTO_RESP;\n return true;\n }\n if (len > 0 && data[0] == '\\0') {\n *proto = PROTO_POSTGRES;\n return true;\n }\n // Parse the first line of text\n size_t n = 0;\n for (size_t i = 0; i < len; i++) {\n if (data[i] == '\\n') {\n n = i+1;\n break;\n }\n }\n // Look for \" HTTP/*.*\\r\\n\" suffix\n if (n >= 11 && memcmp(data+n-11, \" HTTP/\", 5) == 0 && \n data[n-4] == '.' && data[n-2] == '\\r')\n {\n *proto = PROTO_HTTP;\n return true;\n }\n // Trim the prefix, Resp+Telnet and Memcache both allow for spaces between\n // arguments.\n while (*data == ' ') {\n data++;\n n--;\n len--;\n }\n // Treat all uppercase commands as Resp+Telnet\n if (n > 0 && data[0] >= 'A' && data[0] <= 'Z') {\n *proto = PROTO_RESP;\n return true;\n }\n // Look for Memcache commands\n if (n >= 1) {\n *proto = PROTO_MEMCACHE;\n return true;\n }\n // Protocol is unknown\n *proto = 0;\n return false;\n}\n\n// Returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\n// On success, the args and proto will be set to the command arguments and\n// protocol type, respectively.\n//\n// It's required to set proto to 0 for the first command, per client.\n// Then continue to provide the last known proto. \n// This allows for the parser to learn and predict the protocol for ambiguous\n// protocols; like Resp+Telnet, Memcache+Text, HTTP, etc.\n//\n// The noreply param is an output param that is only set when the proto is\n// memcache. The argument is stripped from the args array,\n// but made available to the caller in case it needs to be known.\n//\n// The keepalive param is an output param that is only set when the proto is\n// http. It's used to let the caller know to keep the connection alive for\n// another request.\nssize_t parse_command(const void *data, size_t len, struct args *args, \n int *proto, bool *noreply, int *httpvers, bool *keepalive, struct pg **pg)\n{\n args_clear(args);\n parse_lasterr[0] = '\\0';\n *httpvers = 0;\n *noreply = false;\n *keepalive = false;\n // Sniff for the protocol. This should only happen once per client, upon\n // their first request.\n if (*proto == 0) {\n if (!sniff_proto(data, len, proto)) {\n // Unknown protocol\n goto fail;\n }\n if (*proto == 0) {\n // Not enough data to determine yet\n return 0;\n }\n }\n if (*proto == PROTO_RESP) {\n const uint8_t *bytes = data;\n if (bytes[0] == '*') {\n return parse_resp(data, len, args);\n } else {\n return parse_resp_telnet(data, len, args);\n }\n } else if (*proto == PROTO_MEMCACHE) {\n return parse_memcache(data, len, args, noreply);\n } else if (*proto == PROTO_HTTP) {\n return parse_http(data, len, args, httpvers, keepalive);\n } else if (*proto == PROTO_POSTGRES) {\n return parse_postgres(data, len, args, pg);\n }\nfail:\n parse_seterror(\"ERROR\");\n return -1;\n}\n\n"], ["/pogocache/src/args.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit args.c provides functions for managing command arguments\n#include \n#include \n#include \n#include \"args.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n\nconst char *args_at(struct args *args, int idx, size_t *len) {\n *len = args->bufs[idx].len;\n return args->bufs[idx].data;\n}\n\nint args_count(struct args *args) {\n return args->len;\n}\n\nbool args_eq(struct args *args, int index, const char *str) {\n if ((size_t)index >= args->len) {\n return false;\n }\n size_t alen = args->bufs[index].len;\n const char *arg = args->bufs[index].data;\n size_t slen = strlen(str); \n if (alen != slen) {\n return false;\n }\n for (size_t i = 0; i < slen ; i++) {\n if (tolower(str[i]) != tolower(arg[i])) {\n return false;\n }\n }\n return true;\n}\n\nvoid args_append(struct args *args, const char *data, size_t len,\n bool zerocopy)\n{\n#ifdef NOZEROCOPY\n zerocopy = 0;\n#endif\n if (args->len == args->cap) {\n args->cap = args->cap == 0 ? 4 : args->cap*2;\n args->bufs = xrealloc(args->bufs, args->cap * sizeof(struct buf));\n memset(&args->bufs[args->len], 0, (args->cap-args->len) * \n sizeof(struct buf));\n }\n if (zerocopy) {\n buf_clear(&args->bufs[args->len]);\n args->bufs[args->len].len = len;\n args->bufs[args->len].data = (char*)data;\n } else {\n args->bufs[args->len].len = 0;\n buf_append(&args->bufs[args->len], data, len);\n }\n if (args->len == 0) {\n args->zerocopy = zerocopy;\n } else {\n args->zerocopy = args->zerocopy && zerocopy;\n }\n args->len++;\n}\n\nvoid args_clear(struct args *args) {\n if (!args->zerocopy) {\n for (size_t i = 0 ; i < args->len; i++) {\n buf_clear(&args->bufs[i]);\n }\n }\n args->len = 0;\n}\n\nvoid args_free(struct args *args) {\n args_clear(args);\n xfree(args->bufs);\n}\n\nvoid args_print(struct args *args) {\n printf(\". \");\n for (size_t i = 0; i < args->len; i++) {\n char *buf = args->bufs[i].data;\n int len = args->bufs[i].len;\n printf(\"[\"); \n binprint(buf, len);\n printf(\"] \");\n }\n printf(\"\\n\");\n}\n\n// remove the first item\nvoid args_remove_first(struct args *args) {\n if (args->len > 0) {\n buf_clear(&args->bufs[0]);\n for (size_t i = 1; i < args->len; i++) {\n args->bufs[i-1] = args->bufs[i];\n }\n args->len--;\n }\n}\n"], ["/pogocache/src/xmalloc.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit xmalloc.c is the primary allocator interface. The xmalloc/xfree\n// functions should be used instead of malloc/free.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#if defined(__linux__) && defined(__GLIBC__)\n#include \n#define HAS_MALLOC_H\n#endif\n\n// from main.c\nextern const int useallocator;\nextern const bool usetrackallocs;\n\n#ifdef NOTRACKALLOCS\n#define add_alloc()\n#define sub_alloc()\nsize_t xallocs(void) {\n return 0;\n}\n#else\nstatic atomic_int_fast64_t nallocs = 0;\n\nsize_t xallocs(void) {\n if (usetrackallocs) {\n return atomic_load(&nallocs);\n } else {\n return 0;\n }\n}\n\nstatic void add_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_add_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n\nstatic void sub_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_sub_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n#endif\n\nstatic void check_ptr(void *ptr) {\n if (!ptr) {\n fprintf(stderr, \"# %s\\n\", strerror(ENOMEM));\n abort();\n }\n}\n\nvoid *xmalloc(size_t size) {\n void *ptr = malloc(size);\n check_ptr(ptr);\n add_alloc();\n return ptr;\n}\n\nvoid *xrealloc(void *ptr, size_t size) {\n if (!ptr) {\n return xmalloc(size);\n }\n ptr = realloc(ptr, size);\n check_ptr(ptr);\n return ptr;\n}\n\nvoid xfree(void *ptr) {\n if (!ptr) {\n return;\n }\n free(ptr);\n sub_alloc();\n}\n\nvoid xpurge(void) {\n#ifdef HAS_MALLOC_H\n // Releases unused heap memory to OS\n malloc_trim(0);\n#endif\n}\n"], ["/pogocache/src/buf.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit buf.c is a simple interface for creating byte buffers\n#include \n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"buf.h\"\n\nvoid buf_ensure(struct buf *buf, size_t len) {\n if (buf->len+len > buf->cap) {\n size_t oldcap = buf->cap;\n size_t newcap = buf->cap;\n if (oldcap == 0) {\n buf->data = 0;\n newcap = 16;\n } else {\n newcap *= 2;\n }\n while (buf->len+len > newcap) {\n newcap *= 2;\n }\n buf->data = xrealloc(buf->data, newcap);\n buf->cap = newcap;\n }\n}\n\nvoid buf_append(struct buf *buf, const void *data, size_t len){\n buf_ensure(buf, len);\n memcpy(buf->data+buf->len, data, len);\n buf->len += len;\n}\n\nvoid buf_append_byte(struct buf *buf, char byte) {\n if (buf->len < buf->cap) {\n buf->data[buf->len++] = byte;\n } else {\n buf_append(buf, &byte, 1);\n }\n}\n\nvoid buf_clear(struct buf *buf) {\n // No capacity means this buffer is owned somewhere else and we \n // must not free the data.\n if (buf->cap) {\n xfree(buf->data);\n }\n memset(buf, 0, sizeof(struct buf));\n}\n\nvoid buf_append_uvarint(struct buf *buf, uint64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_u64(buf->data+buf->len, x);\n buf->len += n;\n}\n\nvoid buf_append_varint(struct buf *buf, int64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_i64(buf->data+buf->len, x);\n buf->len += n;\n}\n"], ["/pogocache/src/stats.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit stats.c tracks various stats. Mostly for the memcache protocol.\n#include \n#include \"stats.h\"\n\nstatic atomic_uint_fast64_t g_stat_cmd_flush = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_touch = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_meta = 0;\nstatic atomic_uint_fast64_t g_stat_get_expired = 0;\nstatic atomic_uint_fast64_t g_stat_get_flushed = 0;\nstatic atomic_uint_fast64_t g_stat_delete_misses = 0;\nstatic atomic_uint_fast64_t g_stat_delete_hits = 0;\nstatic atomic_uint_fast64_t g_stat_incr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_incr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_decr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_decr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_misses = 0;\nstatic atomic_uint_fast64_t g_stat_cas_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_badval = 0;\nstatic atomic_uint_fast64_t g_stat_touch_hits = 0;\nstatic atomic_uint_fast64_t g_stat_touch_misses = 0;\nstatic atomic_uint_fast64_t g_stat_store_too_large = 0;\nstatic atomic_uint_fast64_t g_stat_store_no_memory = 0;\nstatic atomic_uint_fast64_t g_stat_auth_cmds = 0;\nstatic atomic_uint_fast64_t g_stat_auth_errors = 0;\n\nvoid stat_cmd_flush_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_flush, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_touch_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_touch, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_meta_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_meta, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_expired_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_expired, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_flushed_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_flushed, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_badval_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_badval, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_too_large_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_too_large, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_no_memory_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_no_memory, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_cmds_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_cmds, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_errors_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_errors, 1, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_flush(void) {\n return atomic_load_explicit(&g_stat_cmd_flush, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_touch(void) {\n return atomic_load_explicit(&g_stat_cmd_touch, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_meta(void) {\n return atomic_load_explicit(&g_stat_cmd_meta, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_expired(void) {\n return atomic_load_explicit(&g_stat_get_expired, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_flushed(void) {\n return atomic_load_explicit(&g_stat_get_flushed, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_misses(void) {\n return atomic_load_explicit(&g_stat_delete_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_hits(void) {\n return atomic_load_explicit(&g_stat_delete_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_misses(void) {\n return atomic_load_explicit(&g_stat_incr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_hits(void) {\n return atomic_load_explicit(&g_stat_incr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_misses(void) {\n return atomic_load_explicit(&g_stat_decr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_hits(void) {\n return atomic_load_explicit(&g_stat_decr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_misses(void) {\n return atomic_load_explicit(&g_stat_cas_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_hits(void) {\n return atomic_load_explicit(&g_stat_cas_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_badval(void) {\n return atomic_load_explicit(&g_stat_cas_badval, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_hits(void) {\n return atomic_load_explicit(&g_stat_touch_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_misses(void) {\n return atomic_load_explicit(&g_stat_touch_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_too_large(void) {\n return atomic_load_explicit(&g_stat_store_too_large, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_no_memory(void) {\n return atomic_load_explicit(&g_stat_store_no_memory, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_cmds(void) {\n return atomic_load_explicit(&g_stat_auth_cmds, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_errors(void) {\n return atomic_load_explicit(&g_stat_auth_errors, __ATOMIC_RELAXED);\n}\n\n\n"], ["/pogocache/src/uring.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n#include \n#include \"uring.h\"\n\nbool uring_available(void) {\n#ifdef NOURING\n return false;\n#else\n static atomic_int available = -1;\n if (atomic_load_explicit(&available, __ATOMIC_ACQUIRE )== -1) {\n struct io_uring ring;\n if (io_uring_queue_init(1, &ring, 0) == 0) {\n io_uring_queue_exit(&ring);\n atomic_store(&available, 1);\n } else {\n atomic_store(&available, 0);\n }\n }\n return atomic_load_explicit(&available, __ATOMIC_ACQUIRE) == 1;\n#endif\n}\n"]], "task_instance_info": {"created_time": "2025-08-20 18:04:05", "created_task_model": "DeepSeek-R1", "function_skeleton": "bool tls_accept(int fd, struct tls **tls_out) {}", "code_description": "This code segment functions to: Establish a secure connection through protocol negotiation and error handling while initializing cryptographic communication components for network interactions."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```c\n[TASK_BEGIN]\nbool tls_accept(int fd, struct tls **tls_out) {\n struct tls *t = xmalloc(sizeof(*t));\n t->ssl = SSL_new(ctx);\n if (!t->ssl) {\n ERR_print_errors_fp(stderr);\n xfree(t);\n *tls_out = NULL;\n return false;\n }\n if (SSL_set_fd(t->ssl, fd) != 1) {\n ERR_print_errors_fp(stderr);\n SSL_free(t->ssl);\n xfree(t);\n *tls_out = NULL;\n return false;\n }\n int ret = SSL_accept(t->ssl);\n if (ret <= 0) {\n ERR_print_errors_fp(stderr);\n SSL_free(t->ssl);\n xfree(t);\n *tls_out = NULL;\n return false;\n }\n *tls_out = t;\n return true;\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 18-04-56"}, "editdistance_info": {"edit_distance": 32.1739, "calculate_time": "2025-08-20 18:04:56", "true_code_clean": "bool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n *tls_out = 0;\n return true;\n }\n SSL *ssl = SSL_new(ctx);\n if (!ssl) {\n printf(\"# tls: SSL_new() failed\\n\");\n *tls_out = 0;\n return false;\n }\n SSL_set_fd(ssl, fd);\n SSL_set_verify(ssl, SSL_VERIFY_PEER, 0);\n int ret = SSL_accept(ssl);\n if (ret <= 0) {\n int err = SSL_get_error(ssl, ret);\n if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {\n printf(\"# tls: SSL_accept() failed\\n\");\n ERR_print_errors_fp(stderr);\n SSL_free(ssl);\n *tls_out = 0;\n return false;\n }\n }\n struct tls *tls = xmalloc(sizeof(struct tls));\n memset(tls, 0, sizeof(struct tls));\n tls->ssl = ssl;\n *tls_out = tls;\n return true;\n}", "predict_code_clean": "bool tls_accept(int fd, struct tls **tls_out) {\n struct tls *t = xmalloc(sizeof(*t));\n t->ssl = SSL_new(ctx);\n if (!t->ssl) {\n ERR_print_errors_fp(stderr);\n xfree(t);\n *tls_out = NULL;\n return false;\n }\n if (SSL_set_fd(t->ssl, fd) != 1) {\n ERR_print_errors_fp(stderr);\n SSL_free(t->ssl);\n xfree(t);\n *tls_out = NULL;\n return false;\n }\n int ret = SSL_accept(t->ssl);\n if (ret <= 0) {\n ERR_print_errors_fp(stderr);\n SSL_free(t->ssl);\n xfree(t);\n *tls_out = NULL;\n return false;\n }\n *tls_out = t;\n return true;\n}"}} {"repo_name": "pogocache", "file_name": "/pogocache/src/memcache.c", "inference_info": {"prefix_code": "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit memcache.c provides the parser for the Memcache wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\nstatic __thread size_t mc_n = 0;\n\nsize_t parse_lastmc_n(void) {\n return mc_n;\n}\n\nbool mc_valid_key(struct args *args, int i) {\n const uint8_t *key = (uint8_t*)args->bufs[i].data;\n size_t len = args->bufs[i].len;\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] == 0x7F) {\n return false;\n }\n }\n return true;\n}\n\nenum mc_cmd { MC_UNKNOWN, \n // writers (optional reply)\n MC_SET, MC_ADD, MC_REPLACE, MC_APPEND, MC_PREPEND, MC_CAS, // storage\n MC_INCR, MC_DECR, // increment/decrement\n MC_FLUSH_ALL, MC_DELETE, // deletion\n MC_TOUCH, // touch\n MC_VERBOSITY, // logging\n // readers (always replys)\n MC_GET, MC_GETS, // retreival\n MC_GAT, MC_GATS, // get and touch\n MC_VERSION, MC_STATS, // information\n MC_QUIT, // client\n};\n\nstatic bool is_mc_store_cmd(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_CAS;\n}\n\nstatic bool is_mc_noreplyable(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_VERBOSITY;\n}\n\n", "suffix_code": "\n\nssize_t parse_memcache(const char *data, size_t len, struct args *args, \n bool *noreply)\n{\n ssize_t n = parse_memcache_telnet(data, len, args);\n if (n <= 0 || args->len == 0) {\n return n;\n }\n // args_print(args);\n mc_n = n;\n enum mc_cmd cmd;\n struct args args2 = { 0 };\n *noreply = false;\n // check for common get-2\n if (args->len == 2 && arg_const_eq(args, 0, \"get\")) {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args->bufs[0].data = \"mget\";\n args->bufs[0].len = 4;\n return n;\n }\n // Check for common set-5 (allows for expiry)\n if (args->len == 5 && arg_const_eq(args, 0, \"set\")) {\n if (args->bufs[2].len == 1 && args->bufs[2].data[0] == '0') {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool expset = false;\n int64_t x;\n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n expset = true;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n // replace the \"flags\" with a value\n args->bufs[2].len = value_len;\n args->bufs[2].data = (void*)value;\n args->len = 3;\n if (expset) {\n // add the \"ex \" to last two arguments\n args->bufs[4] = args->bufs[3];\n args->bufs[3].data = \"ex\";\n args->bufs[3].len = 2;\n args->len = 5;\n }\n return n;\n } else {\n // flags was set, use plus branch\n cmd = MC_SET;\n goto set_plus;\n }\n }\n // Otherwise use lookup command table. This could be optimized into a\n // switch table or hash table. See cmds.c for hash table example.\n cmd =\n arg_const_eq(args, 0, \"set\") ? MC_SET : // XY\n arg_const_eq(args, 0, \"add\") ? MC_ADD : // XY\n arg_const_eq(args, 0, \"cas\") ? MC_CAS : // XY\n arg_const_eq(args, 0, \"replace\") ? MC_REPLACE : // XY\n arg_const_eq(args, 0, \"get\") ? MC_GET : // XY\n arg_const_eq(args, 0, \"delete\") ? MC_DELETE : // XY\n arg_const_eq(args, 0, \"append\") ? MC_APPEND : // XY\n arg_const_eq(args, 0, \"prepend\") ? MC_PREPEND : // XY\n arg_const_eq(args, 0, \"gets\") ? MC_GETS : // XY\n arg_const_eq(args, 0, \"incr\") ? MC_INCR : // XY\n arg_const_eq(args, 0, \"decr\") ? MC_DECR: // XY\n arg_const_eq(args, 0, \"touch\") ? MC_TOUCH : // X\n arg_const_eq(args, 0, \"gat\") ? MC_GAT : // X\n arg_const_eq(args, 0, \"gats\") ? MC_GATS : // X\n arg_const_eq(args, 0, \"flush_all\") ? MC_FLUSH_ALL : // X\n arg_const_eq(args, 0, \"stats\") ? MC_STATS : // X\n arg_const_eq(args, 0, \"version\") ? MC_VERSION : // X\n arg_const_eq(args, 0, \"quit\") ? MC_QUIT : // XY\n arg_const_eq(args, 0, \"verbosity\") ? MC_VERBOSITY : // X\n MC_UNKNOWN;\n if (cmd == MC_UNKNOWN) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (is_mc_noreplyable(cmd)) {\n if (arg_const_eq(args, args->len-1, \"noreply\")) {\n *noreply = true;\n buf_clear(&args->bufs[args->len-1]);\n args->len--;\n }\n }\n if (is_mc_store_cmd(cmd)) {\n // Store commands include 'set', 'add', 'replace', 'append', 'prepend',\n // and 'cas'.\n if ((cmd == MC_CAS && args->len != 6) && \n (cmd != MC_CAS && args->len != 5))\n {\n parse_seterror(\"ERROR\");\n return -1;\n }\n set_plus:\n // check all values before continuing\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n int64_t x;\n if (!argi64(args, 2, &x) || x < 0) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool hascas = false;\n char cas[24] = \"0\";\n if (cmd == MC_CAS) {\n hascas = true;\n uint64_t y;\n if (!argu64(args, 5, &y)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n assert(args->bufs[5].len < sizeof(cas));\n memcpy(cas, args->bufs[5].data, args->bufs[5].len);\n cas[args->bufs[5].len] = '\\0';\n buf_clear(&args->bufs[5]);\n args->len--;\n }\n\n // Storage commands must read a value that follows the first line.\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n\n // Reconstruct the command into a RESP format. \n bool is_append_prepend = false;\n switch (cmd) {\n case MC_APPEND:\n args_append(&args2, \"append\", 6, true);\n is_append_prepend = true;\n break;\n case MC_PREPEND:\n args_append(&args2, \"prepend\", 7, true);\n is_append_prepend = true;\n break;\n default:\n args_append(&args2, \"set\", 3, true);\n break;\n }\n // Move key arg to new args\n take_and_append_arg(1);\n // Add value arg\n args_append(&args2, value, value_len, true);\n if (!is_append_prepend) {\n if (!(args->bufs[2].len == 1 && args->bufs[2].data[0] == '0')) {\n args_append(&args2, \"flags\", 5, true);\n take_and_append_arg(2);\n }\n \n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n args_append(&args2, \"ex\", 2, true);\n take_and_append_arg(3);\n }\n if (cmd == MC_ADD) {\n args_append(&args2, \"nx\", 2, true);\n } else if (cmd == MC_REPLACE) {\n args_append(&args2, \"xx\", 2, true);\n }\n if (hascas) {\n args_append(&args2, \"cas\", 3, true);\n args_append(&args2, cas, strlen(cas), false);\n }\n }\n } else if (cmd == MC_GET) {\n // Convert 'get * into 'MGET *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mget\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_DELETE) {\n // Convert 'delete ' into 'DEL '\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (args->len > 2) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"del\", 3, true);\n take_and_append_arg(1);\n } else if (cmd == MC_GETS) {\n // Convert 'gets * into 'MGETS *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mgets\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GAT) {\n // Convert 'gat * into 'gat *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gat\", 3, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GATS) {\n // Convert 'gats * into 'gats *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gats\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_STATS) {\n args_append(&args2, \"stats\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_INCR) {\n // Convert 'incr into 'uincrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"uincrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_DECR) {\n // Convert 'decr into 'udecrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"udecrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_TOUCH) {\n // Convert 'touch ' into 'expire '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"expire\", 6, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_FLUSH_ALL) {\n // Convert 'flush_all [delay]' into 'FLUSHALL [DELAY seconds]'\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"flushall\", 8, true);\n if (args->len == 2) {\n args_append(&args2, \"delay\", 5, true);\n take_and_append_arg(1);\n }\n } else if (cmd == MC_QUIT) {\n args_append(&args2, \"quit\", 4, true);\n *noreply = true;\n } else if (cmd == MC_VERSION) {\n args_append(&args2, \"version\", 7, true);\n *noreply = false;\n } else if (cmd == MC_VERBOSITY) {\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"verbosity\", 7, true);\n take_and_append_arg(1);\n } else {\n return -1;\n }\n args_free(args);\n *args = args2;\n return n;\n}\n", "middle_code": "static ssize_t parse_memcache_telnet(const char *data, size_t len, \n struct args *args)\n{\n const char *p = data;\n const char *end = data+len;\n const char *s = p;\n char last = 0;\n while (p < end) {\n char ch = *(p++);\n if (ch == ' ') {\n size_t wn = p-s-1;\n args_append(args, s, wn, true);\n s = p;\n continue;\n }\n if (ch == '\\n') {\n size_t wn = p-s-1;\n if (last == '\\r') {\n wn--;\n }\n if (wn > 0) {\n args_append(args, s, wn, true);\n }\n return p-data;\n }\n last = ch;\n }\n return 0;\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "c", "sub_task_type": null}, "context_code": [["/pogocache/src/cmds.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit cmd.c handles all incoming client commands.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n#include \"pogocache.h\"\n#include \"stats.h\"\n\n// from main.c\nextern const uint64_t seed;\nextern const char *path;\nextern const int verb;\nextern const char *auth;\nextern const bool useauth;\nextern const char *persist;\nextern const int nthreads;\nextern const char *version;\nextern const char *githash;\nextern atomic_int_fast64_t flush_delay;\nextern atomic_bool sweep;\nextern atomic_bool lowmem;\nextern const int nshards;\nextern const int narenas;\nextern const int64_t procstart;\nextern const int maxconns;\n\nextern struct pogocache *cache;\n\nstruct set_entry_context {\n bool written;\n struct conn *conn;\n const char *cmdname;\n};\n\nstatic bool set_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)val, (void)vallen,\n (void)expires, (void)flags, (void)cas;\n struct set_entry_context *ctx = udata;\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n pg_write_row_desc(ctx->conn, (const char*[]){ \"value\" }, 1);\n pg_write_row_data(ctx->conn, (const char*[]){ val }, \n (size_t[]){ vallen }, 1);\n pg_write_completef(ctx->conn, \"%s 1\", ctx->cmdname);\n pg_write_ready(ctx->conn, 'I');\n } else {\n conn_write_bulk(ctx->conn, val, vallen);\n }\n ctx->written = true;\n return true;\n}\n\nstatic void execSET(struct conn *conn, const char *cmdname, \n int64_t now, const char *key,\n size_t keylen, const char *val, size_t vallen, int64_t expires, bool nx,\n bool xx, bool get, bool keepttl, uint32_t flags, uint64_t cas, bool withcas)\n{\n stat_cmd_set_incr(conn);\n struct set_entry_context ctx = { .conn = conn, .cmdname = cmdname };\n struct pogocache_store_opts opts = {\n .time = now,\n .expires = expires,\n .cas = cas,\n .flags = flags,\n .keepttl = keepttl,\n .casop = withcas,\n .nx = nx,\n .xx = xx,\n .lowmem = atomic_load_explicit(&lowmem, __ATOMIC_ACQUIRE),\n .entry = get?set_entry:0,\n .udata = get?&ctx:0,\n };\n int status = pogocache_store(cache, key, keylen, val, vallen, &opts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n return;\n }\n if (get) {\n if (!ctx.written) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n pg_write_completef(conn, \"%s 0\", cmdname);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_null(conn);\n }\n }\n return;\n }\n bool stored = status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED;\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (!stored) {\n if (status == POGOCACHE_FOUND) {\n conn_write_raw(conn, \"EXISTS\\r\\n\", 8);\n } else {\n conn_write_raw(conn, \"NOT_FOUND\\r\\n\", 12);\n }\n } else {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n }\n break;\n case PROTO_HTTP:\n if (!stored) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Stored\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"%s %d\", cmdname, stored?1:0);\n pg_write_ready(conn, 'I');\n break;\n default:\n if (!stored) {\n conn_write_null(conn);\n } else {\n conn_write_string(conn, \"OK\");\n }\n break;\n }\n}\n\nstatic int64_t expiry_seconds_time(struct conn *conn, int64_t now, \n int64_t expiry)\n{\n if (conn_proto(conn) == PROTO_MEMCACHE && expiry > HOUR*24*30) {\n // Consider Unix time value rather than an offset from current time.\n int64_t unix_ = sys_unixnow();\n if (expiry > unix_) {\n expiry = expiry-sys_unixnow();\n } else {\n expiry = 0;\n }\n }\n return int64_add_clamp(now, expiry);\n}\n\n// SET key value [NX | XX] [GET] [EX seconds | PX milliseconds |\n// EXAT unix-time-seconds | PXAT unix-time-milliseconds | KEEPTTL] \n// [FLAGS flags] [CAS cas] \nstatic void cmdSET(struct conn *conn, struct args *args) {\n#ifdef CMDSETOK\n // For testing the theoretical top speed of a single SET command.\n // No data is stored.\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\n#endif\n // RESP command\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n const char *val = args->bufs[2].data;\n size_t vallen = args->bufs[2].len;\n int64_t expires = 0;\n int exkind = 0;\n bool nx = false;\n bool xx = false;\n bool get = false;\n bool keepttl = false;\n bool hasex = false;\n uint32_t flags = 0;\n uint64_t cas = 0;\n bool withcas = false;\n for (size_t i = 3; i < args->len; i++) {\n if (argeq(args, i, \"ex\")) {\n exkind = 1;\n goto parse_ex;\n } else if (argeq(args, i, \"px\")) {\n exkind = 2;\n goto parse_ex;\n } else if (argeq(args, i, \"exat\")) {\n exkind = 3;\n goto parse_ex;\n } else if (argeq(args, i, \"pxat\")) {\n exkind = 4;\n parse_ex:\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, \n &expires);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n if (expires <= 0) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n // memcache allows for negative expiration\n expires = expiry_seconds_time(conn, now, 0);\n goto skip_exkind;\n } else {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n }\n switch (exkind) {\n case 1:\n expires = int64_mul_clamp(expires, SECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 2:\n expires = int64_mul_clamp(expires, MILLISECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 3:\n expires = int64_mul_clamp(expires, SECOND);\n break;\n case 4:\n expires = int64_mul_clamp(expires, MILLISECOND);\n break;\n }\n skip_exkind:\n hasex = true;\n } else if (argeq(args, i, \"nx\")) {\n nx = true;\n } else if (argeq(args, i, \"xx\")) {\n xx = true;\n } else if (argeq(args, i, \"get\")) {\n get = true;\n } else if (argeq(args, i, \"keepttl\")) {\n keepttl = true;\n } else if (argeq(args, i, \"flags\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n uint64_t x;\n if (!argu64(args, i, &x)) {\n goto err_syntax;\n }\n flags = x&UINT32_MAX;\n } else if (argeq(args, i, \"cas\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n if (!argu64(args, i, &cas)) {\n goto err_syntax;\n }\n withcas = true;\n } else {\n goto err_syntax;\n }\n }\n assert(expires >= 0);\n if (keepttl && hasex > 0){\n goto err_syntax;\n }\n if (xx && nx > 0){\n goto err_syntax;\n }\n execSET(conn, \"SET\", now, key, keylen, val, vallen, expires, nx, xx, get,\n keepttl, flags, cas, withcas);\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n}\n\nstatic void cmdSETEX(struct conn *conn, struct args *args) {\n if (args->len != 4) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t ex = 0;\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool ok = parse_i64(args->bufs[2].data, args->bufs[2].len, &ex);\n if (!ok || ex <= 0) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n ex = int64_mul_clamp(ex, SECOND);\n ex = int64_add_clamp(sys_now(), ex);\n const char *val = args->bufs[3].data;\n size_t vallen = args->bufs[3].len;\n execSET(conn, \"SETEX\", now, key, keylen, val, vallen, ex, 0, 0, 0, 0, 0, 0,\n 0);\n}\n\nstruct get_entry_context {\n struct conn *conn;\n bool cas;\n bool mget;\n};\n\nstatic void get_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)key, (void)keylen, (void)cas;\n (void)shard, (void)time, (void)expires, (void)flags, (void)update;\n struct get_entry_context *ctx = udata;\n int x;\n uint8_t buf[24];\n size_t n;\n switch (conn_proto(ctx->conn)) {\n case PROTO_POSTGRES:;\n char casbuf[24];\n if (ctx->cas) {\n x = 1;\n n = snprintf(casbuf, sizeof(casbuf), \"%\" PRIu64, cas);\n } else {\n x = 0;\n casbuf[0] = '\\0';\n n = 0;\n }\n if (ctx->mget) {\n pg_write_row_data(ctx->conn, (const char*[]){ key, val, casbuf }, \n (size_t[]){ keylen, vallen, n }, 2+x);\n } else {\n pg_write_row_data(ctx->conn, (const char*[]){ val, casbuf }, \n (size_t[]){ vallen, n }, 1+x);\n }\n break;\n case PROTO_MEMCACHE:\n conn_write_raw(ctx->conn, \"VALUE \", 6);\n conn_write_raw(ctx->conn, key, keylen);\n n = u64toa(flags, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n n = u64toa(vallen, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n if (ctx->cas) {\n n = u64toa(cas, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n }\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n conn_write_raw(ctx->conn, val, vallen);\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n break;\n case PROTO_HTTP:\n conn_write_http(ctx->conn, 200, \"OK\", val, vallen);\n break;\n default:\n if (ctx->cas) {\n conn_write_array(ctx->conn, 2);\n conn_write_uint(ctx->conn, cas);\n }\n conn_write_bulk(ctx->conn, val, vallen);\n }\n}\n\n// GET key\nstatic void cmdGET(struct conn *conn, struct args *args) {\n stat_cmd_get_incr(conn);\n#ifdef CMDGETNIL\n conn_write_null(conn);\n return;\n#endif\n#ifdef CMDSETOK\n conn_write_string(conn, \"$1\\r\\nx\\r\\n\");\n return;\n#endif\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n struct get_entry_context ctx = { \n .conn = conn\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_HTTP) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\" , -1);\n } else if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 0\");\n } else {\n conn_write_null(conn);\n }\n } else {\n stat_get_hits_incr(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 1\");\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_ready(conn, 'I');\n }\n}\n\n// MGET key [key...]\nstatic void cmdMGET(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct get_entry_context ctx = { \n .conn = conn,\n .mget = true,\n .cas = argeq(args, 0, \"mgets\"),\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int count = 0;\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\", \"value\", \"cas\" }, \n 2+(ctx.cas?1:0));\n } else if (proto == PROTO_RESP) {\n conn_write_array(conn, args->len-1);\n }\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_get_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_null(conn);\n }\n } else {\n count++;\n stat_get_hits_incr(conn);\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"MGET %d\", count);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n }\n}\n\nstruct keys_ctx {\n int64_t now;\n struct buf buf;\n size_t count;\n char *pattern;\n size_t plen;\n};\n\nstatic void keys_ctx_free(struct keys_ctx *ctx) {\n xfree(ctx->pattern);\n buf_clear(&ctx->buf);\n xfree(ctx);\n}\n\n// pattern matcher\n// see https://github.com/tidwall/match.c\nstatic bool match(const char *pat, size_t plen, const char *str, size_t slen,\n int depth)\n{\n if (depth == 128) {\n return false;\n }\n while (plen > 0) {\n if (pat[0] == '\\\\') {\n if (plen == 1) return false;\n pat++; plen--; \n } else if (pat[0] == '*') {\n if (plen == 1) return true;\n if (pat[1] == '*') {\n pat++; plen--;\n continue;\n }\n if (match(pat+1, plen-1, str, slen, depth+1)) return true;\n if (slen == 0) return false;\n str++; slen--;\n continue;\n }\n if (slen == 0) return false;\n if (pat[0] != '?' && str[0] != pat[0]) return false;\n pat++; plen--;\n str++; slen--;\n }\n return slen == 0 && plen == 0;\n}\n\nstatic int keys_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)value, (void)valuelen, (void)expires, \n (void)flags, (void)cas;\n struct keys_ctx *ctx = udata;\n if ((ctx->plen == 1 && *ctx->pattern == '*') || \n match(ctx->pattern, ctx->plen, key, keylen, 0))\n {\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n ctx->count++;\n }\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void bgkeys_work(void *udata) {\n struct keys_ctx *ctx = udata;\n struct pogocache_iter_opts opts = {\n .time = ctx->now,\n .entry = keys_entry,\n .udata = ctx,\n };\n pogocache_iter(cache, &opts);\n}\n\nstatic void bgkeys_done(struct conn *conn, void *udata) {\n struct keys_ctx *ctx = udata;\n int proto = conn_proto(conn);\n const char *p = ctx->buf.data;\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\" }, 1);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n pg_write_row_data(conn, (const char*[]){ key }, \n (size_t[]){ keylen }, 1);\n }\n pg_write_completef(conn, \"KEYS %zu\", ctx->count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_array(conn, ctx->count);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n conn_write_bulk(conn, key, keylen);\n }\n }\n keys_ctx_free(ctx);\n}\n\nstatic void cmdKEYS(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *pattern = args->bufs[1].data;\n size_t plen = args->bufs[1].len;\n struct keys_ctx *ctx = xmalloc(sizeof(struct keys_ctx));\n memset(ctx, 0, sizeof(struct keys_ctx));\n ctx->pattern = xmalloc(plen+1);\n memcpy(ctx->pattern, pattern, plen);\n ctx->pattern[plen] = '\\0';\n ctx->plen = plen;\n ctx->now = now;\n if (!conn_bgwork(conn, bgkeys_work, bgkeys_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n keys_ctx_free(ctx);\n }\n}\n\nstatic void cmdDEL(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct pogocache_delete_opts opts = {\n .time = now,\n };\n int64_t deleted = 0;\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_delete(cache, key, keylen, &opts);\n if (status == POGOCACHE_DELETED) {\n stat_delete_hits_incr(conn);\n deleted++;\n } else {\n stat_delete_misses_incr(conn);\n }\n }\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (deleted == 0) {\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n } else {\n conn_write_raw_cstr(conn, \"DELETED\\r\\n\");\n }\n break;\n case PROTO_HTTP:\n if (deleted == 0) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Deleted\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"DEL %\" PRIi64, deleted);\n pg_write_ready(conn, 'I');\n break;\n default:\n conn_write_int(conn, deleted);\n }\n}\n\nstatic void cmdDBSIZE(struct conn *conn, struct args *args) {\n if (args->len != 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n struct pogocache_count_opts opts = { .time = sys_now() };\n size_t count = pogocache_count(cache, &opts);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"count\", count, \"DBSIZE\");\n } else {\n conn_write_int(conn, (int64_t)count);\n }\n}\n\nstruct flushctx { \n pthread_t th;\n int64_t time;\n int start;\n int count;\n};\n\nstatic void *thflush(void *arg) {\n struct flushctx *ctx = arg;\n struct pogocache_clear_opts opts = { .time = sys_now(), .oneshard = true };\n for (int i = 0; i < ctx->count; i++) {\n opts.oneshardidx = i+ctx->start;\n pogocache_clear(cache, &opts);\n }\n return 0;\n}\n\nstatic void bgflushwork(void *udata) {\n (void)udata;\n atomic_store(&flush_delay, 0);\n int64_t now = sys_now();\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n struct flushctx *ctxs = xmalloc(nprocs*sizeof(struct flushctx));\n memset(ctxs, 0, nprocs*sizeof(struct flushctx));\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->time = now;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (pthread_create(&ctx->th, 0, thflush, ctx) == -1) {\n ctx->th = 0;\n }\n start += ctx->count;\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thflush(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n}\n\nstatic void bgflushdone(struct conn *conn, void *udata) {\n const char *cmdname = udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s SYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\n// FLUSHALL [SYNC|ASYNC] [DELAY ]\nstatic void cmdFLUSHALL(struct conn *conn, struct args *args) {\n const char *cmdname = \n args_eq(args, 0, \"flush\") ? \"FLUSH\" :\n args_eq(args, 0, \"flushdb\") ? \"FLUSHDB\" :\n \"FLUSHALL\";\n stat_cmd_flush_incr(conn);\n bool async = false;\n int64_t delay = 0;\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"async\")) {\n async = true;\n } else if (argeq(args, i, \"sync\")) {\n async = false;\n } else if (argeq(args, i, \"delay\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, &delay);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid exptime argument\");\n return;\n }\n if (delay > 0) {\n async = true;\n }\n } else {\n goto err_syntax;\n }\n }\n if (async) {\n if (delay < 0) {\n delay = 0;\n }\n delay = int64_mul_clamp(delay, SECOND);\n delay = int64_add_clamp(delay, sys_now());\n atomic_store(&flush_delay, delay);\n // ticker will check the delay and perform the flush\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s ASYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n // Flush database is slow. cmdname is static and thread safe\n conn_bgwork(conn, bgflushwork, bgflushdone, (void*)cmdname);\n return;\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct bgsaveloadctx {\n bool ok; // true = success, false = out of disk space\n bool fast; // use all the proccesing power, otherwise one thread.\n char *path; // path to file\n bool load; // otherwise save\n};\n\nstatic void bgsaveloadwork(void *udata) {\n struct bgsaveloadctx *ctx = udata;\n int64_t start = sys_now();\n int status;\n if (ctx->load) {\n status = load(ctx->path, ctx->fast, 0);\n } else {\n status = save(ctx->path, ctx->fast);\n }\n printf(\". %s finished %.3f secs\\n\", ctx->load?\"load\":\"save\", \n (sys_now()-start)/1e9);\n ctx->ok = status == 0;\n}\n\nstatic void bgsaveloaddone(struct conn *conn, void *udata) {\n struct bgsaveloadctx *ctx = udata;\n if (ctx->ok) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s OK\", ctx->load?\"LOAD\":\"SAVE\");\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (ctx->load) {\n conn_write_error(conn, \"load failed\");\n } else {\n conn_write_error(conn, \"save failed\");\n }\n }\n xfree(ctx->path);\n xfree(ctx);\n}\n\n// SAVE [TO ] [FAST]\n// LOAD [FROM ] [FAST]\nstatic void cmdSAVELOAD(struct conn *conn, struct args *args) {\n bool load = argeq(args, 0, \"load\");\n bool fast = false;\n const char *path = persist;\n size_t plen = strlen(persist);\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"fast\")) {\n fast = true;\n } else if ((load && argeq(args, i, \"from\")) || \n (!load && argeq(args, i, \"to\")))\n {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n path = args->bufs[i].data;\n plen = args->bufs[i].len;\n } else {\n goto err_syntax;\n }\n }\n if (plen == 0) {\n conn_write_error(conn, \"ERR path not provided\");\n return;\n }\n struct bgsaveloadctx *ctx = xmalloc(sizeof(struct bgsaveloadctx));\n memset(ctx, 0, sizeof(struct bgsaveloadctx));\n ctx->fast = fast;\n ctx->path = xmalloc(plen+1);\n ctx->load = load;\n memcpy(ctx->path, path, plen);\n ctx->path[plen] = '\\0';\n if (!conn_bgwork(conn, bgsaveloadwork, bgsaveloaddone, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx->path);\n xfree(ctx);\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct ttlctx {\n struct conn *conn;\n bool pttl;\n};\n\nstatic void ttl_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)key, (void)keylen, (void)val, (void)vallen, (void)flags,\n (void)cas, (void)update;\n struct ttlctx *ctx = udata;\n int64_t ttl;\n if (expires > 0) {\n ttl = expires-time;\n if (ctx->pttl) {\n ttl /= MILLISECOND;\n } else {\n ttl /= SECOND;\n }\n } else {\n ttl = -1;\n }\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n char ttlstr[24];\n size_t n = i64toa(ttl, (uint8_t*)ttlstr);\n pg_write_row_data(ctx->conn, (const char*[]){ ttlstr }, \n (size_t[]){ n }, 1);\n } else {\n conn_write_int(ctx->conn, ttl);\n }\n}\n\nstatic void cmdTTL(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool pttl = argeq(args, 0, \"pttl\");\n struct ttlctx ctx = { .conn = conn, .pttl = pttl };\n struct pogocache_load_opts opts = {\n .time = sys_now(),\n .entry = ttl_entry,\n .notouch = true,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ pttl?\"pttl\":\"ttl\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_int(conn, -2);\n }\n } else {\n stat_get_hits_incr(conn);\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %d\", pttl?\"PTTL\":\"TTL\",\n status!=POGOCACHE_NOTFOUND);\n pg_write_ready(conn, 'I');\n }\n}\n\nstatic void expire_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)expires, (void)cas;\n struct pogocache_update *ctx = udata;\n ctx->flags = flags;\n ctx->value = value;\n ctx->valuelen = valuelen;\n *update = ctx;\n}\n\n// EXPIRE key seconds\n// returns 1 if success or 0 on failure. \nstatic void cmdEXPIRE(struct conn *conn, struct args *args) {\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n int64_t expires;\n if (!argi64(args, 2, &expires)) {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n return;\n }\n expires = int64_mul_clamp(expires, POGOCACHE_SECOND);\n expires = int64_add_clamp(now, expires);\n struct pogocache_update ctx = { .expires = expires };\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = expire_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(cache, key, keylen, &lopts);\n int ret = status == POGOCACHE_FOUND;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"EXPIRE %d\", ret);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, ret);\n }\n}\n\n// EXISTS key [key...]\n// Checks if one or more keys exist in the cache.\n// Return the number of keys that exist\nstatic void cmdEXISTS(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t count = 0;\n struct pogocache_load_opts opts = {\n .time = now,\n .notouch = true,\n };\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n count++;\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"exists\", count, \"EXISTS\");\n } else {\n conn_write_int(conn, count);\n }\n}\n\nstatic void sweep_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n size_t swept;\n size_t kept;\n struct pogocache_sweep_opts opts = {\n .time = start,\n };\n printf(\". sweep started\\n\");\n pogocache_sweep(cache, &swept, &kept, &opts);\n double elapsed = (sys_now()-start)/1e9;\n printf(\". sweep finished in %.2fs, (swept=%zu, kept=%zu) \\n\", elapsed, \n swept, kept);\n}\n\nstatic void sweep_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thsweep(void *arg) {\n (void)arg;\n sweep_work(0);\n return 0;\n}\n\n// SWEEP [ASYNC]\nstatic void cmdSWEEP(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thsweep, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, sweep_work, sweep_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstatic void purge_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n printf(\". purge started\\n\");\n xpurge();\n double elapsed = (sys_now()-start)/1e9;\n printf(\". purge finished in %.2fs\\n\", elapsed);\n}\n\nstatic void purge_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thpurge(void *arg) {\n (void)arg;\n purge_work(0);\n return 0;\n}\n\n// PURGE [ASYNC]\nstatic void cmdPURGE(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thpurge, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, purge_work, purge_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstruct populate_ctx {\n pthread_t th;\n size_t start;\n size_t count;\n char *prefix;\n size_t prefixlen;\n char *val;\n size_t vallen;\n bool randex;\n int randmin;\n int randmax;\n};\n\nstatic void *populate_entry(void *arg) {\n int64_t now = sys_now();\n struct populate_ctx *ctx = arg;\n char *key = xmalloc(ctx->prefixlen+32);\n memcpy(key, ctx->prefix, ctx->prefixlen);\n key[ctx->prefixlen++] = ':';\n for (size_t i = ctx->start; i < ctx->start+ctx->count; i++) {\n size_t n = i64toa(i, (uint8_t*)(key+ctx->prefixlen));\n size_t keylen = ctx->prefixlen+n;\n struct pogocache_store_opts opts = { \n .time = now,\n };\n if (ctx->randex) {\n int ex = (rand()%(ctx->randmax-ctx->randmin))+ctx->randmin;\n opts.ttl = ex*POGOCACHE_SECOND;\n }\n pogocache_store(cache, key, keylen, ctx->val, ctx->vallen, &opts);\n }\n xfree(key);\n return 0;\n}\n\n// DEBUG POPULATE [rand-ex-range]\n// DEBUG POPULATE \n// DEBUG POPULATE 1000000 test 16\n// DEBUG POPULATE 1000000 test 16 5-10\nstatic void cmdDEBUG_populate(struct conn *conn, struct args *args) {\n if (args->len != 4 && args->len != 5) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t count;\n if (!argi64(args, 1, &count) || count < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n size_t prefixlen = args->bufs[2].len;\n char *prefix = args->bufs[2].data;\n int64_t vallen;\n if (!argi64(args, 3, &vallen) || vallen < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n bool randex = false;\n int randmin = 0;\n int randmax = 0;\n if (args->len == 5) {\n size_t exlen = args->bufs[4].len;\n char *aex = args->bufs[4].data;\n char *ex = xmalloc(exlen+1);\n memcpy(ex, aex, exlen);\n ex[exlen] = '\\0';\n if (strchr(ex, '-')) {\n randmin = atoi(ex);\n randmax = atoi(strchr(ex, '-')+1);\n randex = true;\n }\n xfree(ex);\n }\n\n char *val = xmalloc(vallen);\n memset(val, 0, vallen);\n int nprocs = sys_nprocs();\n if (nprocs < 0) {\n nprocs = 1;\n }\n struct populate_ctx *ctxs = xmalloc(nprocs*sizeof(struct populate_ctx));\n memset(ctxs, 0, nprocs*sizeof(struct populate_ctx));\n size_t group = count/nprocs;\n size_t start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n ctx->start = start;\n if (i == nprocs-1) {\n ctx->count = count-start;\n } else {\n ctx->count = group;\n }\n ctx->prefix = prefix;\n ctx->prefixlen = prefixlen;\n ctx->val = val;\n ctx->vallen = vallen;\n ctx->randex = randex;\n ctx->randmin = randmin;\n ctx->randmax = randmax;\n if (pthread_create(&ctx->th, 0, populate_entry, ctx) == -1) {\n ctx->th = 0;\n }\n start += group;\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n populate_entry(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n xfree(val);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"DEBUG POPULATE %\" PRIi64, count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstruct dbg_detach_ctx {\n int64_t now;\n int64_t then;\n};\n\nstatic void detach_work(void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n ctx->then = sys_now();\n // printf(\". ----- DELAY START\\n\");\n // sleep(1);\n // printf(\". ----- DELAY END\\n\");\n}\n\nstatic void detach_done(struct conn *conn, void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n char buf[128];\n snprintf(buf, sizeof(buf), \"%\" PRId64 \":%\" PRId64, ctx->now, ctx->then);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_str_ready(conn, \"detach\", buf, \"DEBUG DETACH\");\n } else {\n conn_write_bulk_cstr(conn, buf);\n }\n xfree(ctx);\n}\n\n// DEBUG detach\nstatic void cmdDEBUG_detach(struct conn *conn, struct args *args) {\n (void)args;\n struct dbg_detach_ctx *ctx = xmalloc(sizeof(struct dbg_detach_ctx));\n memset(ctx, 0,sizeof(struct dbg_detach_ctx));\n ctx->now = sys_now();\n if (!conn_bgwork(conn, detach_work, detach_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx);\n }\n}\n\n// DEBUG subcommand (args...)\nstatic void cmdDEBUG(struct conn *conn, struct args *args) {\n if (args->len <= 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n // args = args[1:]\n args = &(struct args){ .bufs = args->bufs+1, .len = args->len-1 };\n if (argeq(args, 0, \"populate\")) {\n cmdDEBUG_populate(conn, args);\n } else if (argeq(args, 0, \"detach\")) {\n cmdDEBUG_detach(conn, args);\n } else {\n conn_write_error(conn, \"ERR unknown subcommand\");\n }\n}\n\nstatic void cmdECHO(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"ECHO\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n}\n\nstatic void cmdPING(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n if (args->len == 1) {\n pg_write_simple_row_str_ready(conn, \"message\", \"PONG\", \"PING\"); \n } else {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"PING\");\n }\n } else {\n if (args->len == 1) {\n conn_write_string(conn, \"PONG\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n }\n}\n\nstatic void cmdQUIT(struct conn *conn, struct args *args) {\n (void)args;\n if (conn_proto(conn) == PROTO_RESP) {\n conn_write_string(conn, \"OK\");\n }\n conn_close(conn);\n}\n\n// TOUCH key [key...]\nstatic void cmdTOUCH(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t touched = 0;\n struct pogocache_load_opts opts = { \n .time = now,\n };\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_touch_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n stat_touch_hits_incr(conn);\n touched++;\n } else {\n stat_touch_misses_incr(conn);\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"TOUCH %\" PRIi64, touched);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, touched);\n }\n}\n\nstruct get64ctx {\n bool ok;\n bool isunsigned;\n union {\n int64_t ival;\n uint64_t uval;\n };\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n};\n\nunion delta { \n uint64_t u;\n int64_t i;\n};\n\nstatic void get64(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update;\n struct get64ctx *ctx = udata;\n ctx->flags = flags;\n ctx->expires = expires;\n ctx->cas = cas;\n if (ctx->isunsigned) {\n ctx->ok = parse_u64(val, vallen, &ctx->uval);\n } else {\n ctx->ok = parse_i64(val, vallen, &ctx->ival);\n }\n}\n\nstatic void execINCRDECR(struct conn *conn, const char *key, size_t keylen, \n union delta delta, bool decr, bool isunsigned, const char *cmdname)\n{\n bool hit = false;\n bool miss = false;\n int64_t now = sys_now();\n struct get64ctx ctx = { .isunsigned = isunsigned };\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts gopts = {\n .time = now,\n .entry = get64,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &gopts);\n bool found = status == POGOCACHE_FOUND;\n if (found && !ctx.ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR cannot increment or \"\n \"decrement non-numeric value\\r\\n\");\n goto done;\n }\n goto fail_value_non_numeric;\n } else if (!found && conn_proto(conn) == PROTO_MEMCACHE) {\n miss = true;\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n goto done;\n }\n // add or subtract\n bool overflow;\n if (isunsigned) {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.uval, delta.u, &ctx.uval);\n } else {\n overflow = __builtin_add_overflow(ctx.uval, delta.u, &ctx.uval);\n }\n } else {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.ival, delta.i, &ctx.ival);\n } else {\n overflow = __builtin_add_overflow(ctx.ival, delta.i, &ctx.ival);\n }\n }\n if (overflow && conn_proto(conn) != PROTO_MEMCACHE) {\n goto fail_overflow;\n }\n // re-set the value\n char val[24];\n size_t vallen;\n if (isunsigned) {\n vallen = u64toa(ctx.uval, (uint8_t*)val);\n } else {\n vallen = i64toa(ctx.ival, (uint8_t*)val);\n }\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires, \n .flags = ctx.flags, \n .cas = ctx.cas,\n .udata = &ctx,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n char val[24];\n if (isunsigned) {\n snprintf(val, sizeof(val), \"%\" PRIu64, ctx.uval);\n } else {\n snprintf(val, sizeof(val), \"%\" PRIi64, ctx.ival);\n }\n pg_write_simple_row_str_readyf(conn, \"value\", val, \"%s\", cmdname);\n } else {\n if (isunsigned) {\n conn_write_uint(conn, ctx.uval);\n } else {\n conn_write_int(conn, ctx.ival);\n }\n }\n hit = true;\n goto done;\nfail_value_non_numeric:\n conn_write_error(conn, ERR_INVALID_INTEGER);\n goto done;\nfail_overflow:\n conn_write_error(conn, \"ERR increment or decrement would overflow\");\n goto done;\ndone:\n if (hit) {\n if (decr) {\n stat_decr_hits_incr(conn);\n } else {\n stat_incr_hits_incr(conn);\n }\n } else if (miss) {\n if (decr) {\n stat_decr_misses_incr(conn);\n } else {\n stat_incr_misses_incr(conn);\n }\n }\n pogocache_end(batch);\n}\n\nstatic void cmdINCRDECRBY(struct conn *conn, struct args *args, \n bool decr, const char *cmdname)\n{\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta;\n bool ok;\n if (isunsigned) {\n ok = argu64(args, 2, &delta.u);\n } else {\n ok = argi64(args, 2, &delta.i);\n }\n if (!ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR invalid numeric delta \"\n \"argument\\r\\n\");\n } else {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n }\n return;\n }\n execINCRDECR(conn, key, keylen, delta, decr, isunsigned, cmdname);\n}\n\n// DECRBY key num\nstatic void cmdDECRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, true, \"DECRBY\");\n}\n\n// INCRBY key num\nstatic void cmdINCRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, false, \"INCRBY\");\n}\n\n// DECR key\nstatic void cmdDECR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, true, isunsigned, \"DECR\");\n}\n\n// INCR key\nstatic void cmdINCR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, false, isunsigned, \"INCR\");\n}\n\nstruct appendctx {\n bool prepend;\n uint32_t flags;\n int64_t expires;\n const char *val;\n size_t vallen;\n char *outval;\n size_t outvallen;\n};\n\nstatic void append_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires, \n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update, (void)cas;\n struct appendctx *ctx = udata;\n ctx->expires = expires;\n ctx->flags = flags;\n ctx->outvallen = vallen+ctx->vallen;\n ctx->outval = xmalloc(ctx->outvallen);\n if (ctx->prepend) {\n memcpy(ctx->outval, ctx->val, ctx->vallen);\n memcpy(ctx->outval+ctx->vallen, val, vallen);\n } else {\n memcpy(ctx->outval, val, vallen);\n memcpy(ctx->outval+vallen, ctx->val, ctx->vallen);\n }\n}\n\n// APPEND \nstatic void cmdAPPEND(struct conn *conn, struct args *args) {\n int64_t now = sys_now();\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int proto = conn_proto(conn);\n bool prepend = argeq(args, 0, \"prepend\");\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n size_t vallen;\n const char *val = args_at(args, 2, &vallen);\n struct appendctx ctx = { \n .prepend = prepend,\n .val = val,\n .vallen = vallen,\n };\n size_t len;\n // Use a batch transaction for key isolation.\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = append_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &lopts);\n if (status == POGOCACHE_NOTFOUND) {\n if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"NOT_STORED\\r\\n\");\n goto done;\n }\n len = vallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n } else {\n if (ctx.outvallen > MAXARGSZ) {\n // do not let values become larger than 500MB\n xfree(ctx.outval);\n conn_write_error(conn, \"ERR value too large\");\n goto done;\n }\n len = ctx.outvallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires,\n .flags = ctx.flags,\n };\n status = pogocache_store(batch, key, keylen, ctx.outval, ctx.outvallen, \n &sopts);\n xfree(ctx.outval);\n }\n if (status == POGOCACHE_NOMEM) {\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %zu\", prepend?\"PREPEND\":\"APPEND\", len);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"STORED\\r\\n\");\n } else {\n conn_write_int(conn, len);\n }\ndone:\n pogocache_end(batch);\n}\n\nstatic void cmdPREPEND(struct conn *conn, struct args *args) {\n cmdAPPEND(conn, args);\n}\n\nstatic void cmdAUTH(struct conn *conn, struct args *args) {\n stat_auth_cmds_incr(0);\n if (!argeq(args, 0, \"auth\")) {\n stat_auth_errors_incr(0);\n goto noauth;\n }\n if (args->len == 3) {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n if (args->len > 3) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n if (args->len == 1) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (args->bufs[1].len != strlen(auth) || \n memcmp(auth, args->bufs[1].data, args->bufs[1].len) != 0)\n {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n conn_setauth(conn, true);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_complete(conn, \"AUTH OK\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\nnoauth:\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \n \"CLIENT_ERROR Authentication required\\r\\n\");\n } else {\n conn_write_error(conn, \"NOAUTH Authentication required.\");\n }\n return;\nwrongpass:\n conn_write_error(conn, \n \"WRONGPASS invalid username-password pair or user is disabled.\");\n}\n\nstruct stats {\n // use the args type as a list.\n struct args args;\n};\n\nstatic void stats_begin(struct stats *stats) {\n memset(stats, 0, sizeof(struct stats));\n}\n\nstatic void stats_end(struct stats *stats, struct conn *conn) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"stat\", \"value\" }, 2);\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n pg_write_row_data(conn, (const char*[]){ stat, val }, \n (size_t[]){ strlen(stat), strlen(val) }, 2);\n }\n pg_write_completef(conn, \"STATS %zu\", stats->args.len);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n char line[512];\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n size_t n = snprintf(line, sizeof(line), \"STAT %s\\r\\n\", stat);\n conn_write_raw(conn, line, n);\n }\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n } else {\n conn_write_array(conn, stats->args.len);\n for (size_t i = 0; i < stats->args.len; i++) {\n conn_write_array(conn, 2);\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n conn_write_bulk_cstr(conn, key);\n conn_write_bulk_cstr(conn, val);\n }\n }\n args_free(&stats->args);\n}\n\nstatic void stats_printf(struct stats *stats, const char *format, ...) {\n // initializing list pointer\n char line[512];\n va_list ap;\n va_start(ap, format);\n size_t len = vsnprintf(line, sizeof(line)-1, format, ap);\n va_end(ap);\n args_append(&stats->args, line, len+1, false); // include null-terminator\n}\n\nstatic void stats(struct conn *conn) {\n struct stats stats;\n stats_begin(&stats);\n stats_printf(&stats, \"pid %d\", getpid());\n stats_printf(&stats, \"uptime %.0f\", (sys_now()-procstart)/1e9);\n stats_printf(&stats, \"time %.0f\", sys_unixnow()/1e9);\n stats_printf(&stats, \"product %s\", \"pogocache\");\n stats_printf(&stats, \"version %s\", version);\n stats_printf(&stats, \"githash %s\", githash);\n stats_printf(&stats, \"pointer_size %zu\", sizeof(uintptr_t)*8);\n struct rusage usage;\n if (getrusage(RUSAGE_SELF, &usage) == 0) {\n stats_printf(&stats, \"rusage_user %ld.%06ld\",\n usage.ru_utime.tv_sec, usage.ru_utime.tv_usec);\n stats_printf(&stats, \"rusage_system %ld.%06ld\",\n usage.ru_stime.tv_sec, usage.ru_stime.tv_usec);\n }\n stats_printf(&stats, \"max_connections %zu\", maxconns);\n stats_printf(&stats, \"curr_connections %zu\", net_nconns());\n stats_printf(&stats, \"total_connections %zu\", net_tconns());\n stats_printf(&stats, \"rejected_connections %zu\", net_rconns());\n stats_printf(&stats, \"cmd_get %\" PRIu64, stat_cmd_get());\n stats_printf(&stats, \"cmd_set %\" PRIu64, stat_cmd_set());\n stats_printf(&stats, \"cmd_flush %\" PRIu64, stat_cmd_flush());\n stats_printf(&stats, \"cmd_touch %\" PRIu64, stat_cmd_touch());\n stats_printf(&stats, \"get_hits %\" PRIu64, stat_get_hits());\n stats_printf(&stats, \"get_misses %\" PRIu64, stat_get_misses());\n stats_printf(&stats, \"delete_misses %\" PRIu64, stat_delete_misses());\n stats_printf(&stats, \"delete_hits %\" PRIu64, stat_delete_hits());\n stats_printf(&stats, \"incr_misses %\" PRIu64, stat_incr_misses());\n stats_printf(&stats, \"incr_hits %\" PRIu64, stat_incr_hits());\n stats_printf(&stats, \"decr_misses %\" PRIu64, stat_decr_misses());\n stats_printf(&stats, \"decr_hits %\" PRIu64, stat_decr_hits());\n stats_printf(&stats, \"touch_hits %\" PRIu64, stat_touch_hits());\n stats_printf(&stats, \"touch_misses %\" PRIu64, stat_touch_misses());\n stats_printf(&stats, \"store_too_large %\" PRIu64, stat_store_too_large());\n stats_printf(&stats, \"store_no_memory %\" PRIu64, stat_store_no_memory());\n stats_printf(&stats, \"auth_cmds %\" PRIu64, stat_auth_cmds());\n stats_printf(&stats, \"auth_errors %\" PRIu64, stat_auth_errors());\n stats_printf(&stats, \"threads %d\", nthreads);\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n stats_printf(&stats, \"rss %zu\", meminfo.rss);\n struct pogocache_size_opts sopts = { .entriesonly=true };\n stats_printf(&stats, \"bytes %zu\", pogocache_size(cache, &sopts));\n stats_printf(&stats, \"curr_items %zu\", pogocache_count(cache, 0));\n stats_printf(&stats, \"total_items %\" PRIu64, pogocache_total(cache, 0));\n stats_end(&stats, conn);\n}\n\nstatic void cmdSTATS(struct conn *conn, struct args *args) {\n if (args->len == 1) {\n return stats(conn);\n }\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\n// Commands hash table. Lazy loaded per thread.\n// Simple open addressing using case-insensitive fnv1a hashes.\nstatic int nbuckets;\nstatic struct cmd *buckets;\n\nstruct cmd {\n const char *name;\n void (*func)(struct conn *conn, struct args *args);\n};\n\nstatic struct cmd cmds[] = {\n { \"set\", cmdSET }, // pg\n { \"get\", cmdGET }, // pg\n { \"del\", cmdDEL }, // pg\n { \"mget\", cmdMGET }, // pg\n { \"mgets\", cmdMGET }, // pg cas detected\n { \"ttl\", cmdTTL }, // pg\n { \"pttl\", cmdTTL }, // pg\n { \"expire\", cmdEXPIRE }, // pg\n { \"setex\", cmdSETEX }, // pg\n { \"dbsize\", cmdDBSIZE }, // pg\n { \"quit\", cmdQUIT }, // pg\n { \"echo\", cmdECHO }, // pg\n { \"exists\", cmdEXISTS }, // pg\n { \"flushdb\", cmdFLUSHALL }, // pg\n { \"flushall\", cmdFLUSHALL }, // pg\n { \"flush\", cmdFLUSHALL }, // pg\n { \"purge\", cmdPURGE }, // pg\n { \"sweep\", cmdSWEEP }, // pg\n { \"keys\", cmdKEYS }, // pg\n { \"ping\", cmdPING }, // pg\n { \"touch\", cmdTOUCH }, // pg\n { \"debug\", cmdDEBUG }, // pg\n { \"incrby\", cmdINCRBY }, // pg\n { \"decrby\", cmdDECRBY }, // pg\n { \"incr\", cmdINCR }, // pg\n { \"decr\", cmdDECR }, // pg\n { \"uincrby\", cmdINCRBY }, // pg unsigned detected in signed operation\n { \"udecrby\", cmdDECRBY }, // pg unsigned detected in signed operation\n { \"uincr\", cmdINCR }, // pg unsigned detected in signed operation\n { \"udecr\", cmdDECR }, // pg unsigned detected in signed operation\n { \"append\", cmdAPPEND }, // pg\n { \"prepend\", cmdPREPEND }, // pg\n { \"auth\", cmdAUTH }, // pg\n { \"save\", cmdSAVELOAD }, // pg\n { \"load\", cmdSAVELOAD }, // pg\n { \"stats\", cmdSTATS }, // pg memcache style stats\n};\n\nstatic void build_commands_table(void) {\n static __thread bool buckets_ready = false;\n static pthread_mutex_t cmd_build_lock = PTHREAD_MUTEX_INITIALIZER;\n static bool built = false;\n if (!buckets_ready) {\n pthread_mutex_lock(&cmd_build_lock);\n if (!built) {\n int ncmds = sizeof(cmds)/sizeof(struct cmd);\n int n = ncmds*8;\n nbuckets = 2;\n while (nbuckets < n) {\n nbuckets *= 2;\n }\n buckets = xmalloc(nbuckets*sizeof(struct cmd));\n memset(buckets, 0, nbuckets*sizeof(struct cmd));\n uint64_t hash;\n for (int i = 0; i < ncmds; i++) {\n hash = fnv1a_case(cmds[i].name, strlen(cmds[i].name));\n for (int j = 0; j < nbuckets; j++) {\n int k = (j+hash)&(nbuckets-1);\n if (!buckets[k].name) {\n buckets[k] = cmds[i];\n break;\n }\n }\n }\n built = true;\n }\n pthread_mutex_unlock(&cmd_build_lock);\n buckets_ready = true;\n }\n}\n\nstatic struct cmd *get_cmd(const char *name, size_t namelen) {\n build_commands_table();\n uint32_t hash = fnv1a_case(name, namelen);\n int j = hash&(nbuckets-1);\n while (1) {\n if (!buckets[j].name) {\n return 0;\n }\n if (argeq_bytes(name, namelen, buckets[j].name)) {\n return &buckets[j];\n }\n j++;\n }\n}\n\nvoid evcommand(struct conn *conn, struct args *args) {\n if (useauth && !conn_auth(conn)) {\n if (conn_proto(conn) == PROTO_HTTP) {\n // Let HTTP traffic through.\n // The request has already been authorized in http.c\n } else {\n cmdAUTH(conn, args);\n return;\n }\n }\n if (verb > 1) {\n if (!argeq(args, 0, \"auth\")) {\n args_print(args);\n }\n }\n struct cmd *cmd = get_cmd(args->bufs[0].data, args->bufs[0].len);\n if (cmd) {\n cmd->func(conn, args);\n } else {\n if (verb > 0) {\n printf(\"# Unknown command '%.*s'\\n\", (int)args->bufs[0].len,\n args->bufs[0].data);\n }\n char errmsg[128];\n snprintf(errmsg, sizeof(errmsg), \"ERR unknown command '%.*s'\", \n (int)args->bufs[0].len, args->bufs[0].data);\n conn_write_error(conn, errmsg);\n }\n}\n"], ["/pogocache/src/postgres.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit postgres.c provides the parser for the Postgres wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n\n// #define PGDEBUG\n\n#define TEXTOID 25\n#define BYTEAOID 17\n\nextern const char *version;\nextern const char *auth;\n\n#ifdef PGDEBUG\n#define dprintf printf\n#else\n#define dprintf(...)\n#endif\n\nstatic void print_packet(const char *data, size_t len) {\n dprintf(\". PACKET=%03zu [ \", len);\n for (size_t i = 0; i < len; i++) {\n printf(\"%02X \", (unsigned char)data[i]);\n }\n dprintf(\"]\\n\");\n dprintf(\". [\");\n for (size_t i = 0; i < len; i++) {\n unsigned char ch = data[i];\n if (ch < ' ') {\n ch = '?';\n }\n dprintf(\"%c\", ch);\n }\n dprintf(\"]\\n\");\n}\n\nstatic int32_t read_i32(const char *data) {\n return ((uint32_t)(uint8_t)data[0] << 24) |\n ((uint32_t)(uint8_t)data[1] << 16) |\n ((uint32_t)(uint8_t)data[2] << 8) |\n ((uint32_t)(uint8_t)data[3] << 0);\n}\n\nstatic void write_i32(char *data, int32_t x) {\n data[0] = (uint8_t)(((uint32_t)x) >> 24) & 0xFF;\n data[1] = (uint8_t)(((uint32_t)x) >> 16) & 0xFF;\n data[2] = (uint8_t)(((uint32_t)x) >> 8) & 0xFF;\n data[3] = (uint8_t)(((uint32_t)x) >> 0) & 0xFF;\n}\n\nstatic int16_t read_i16(const char *data) {\n return ((uint16_t)(uint8_t)data[0] << 8) |\n ((uint16_t)(uint8_t)data[1] << 0);\n}\nstatic void write_i16(char *data, int16_t x) {\n data[0] = (uint8_t)(((uint16_t)x) >> 8) & 0xFF;\n data[1] = (uint8_t)(((uint16_t)x) >> 0) & 0xFF;\n}\n\n// parse_begin is called to begin parsing a client message.\n#define parse_begin() \\\n const char *p = data; \\\n const char *e = p+len; \\\n (void)args, (void)pg, (void)e;\n\n// parse_end is called when parsing client message is complete.\n// This will check that the position of the client stream matches the\n// expected lenght provided by the client. \n#define parse_end() \\\n if ((size_t)(p-data) != len) { \\\n return -1; \\\n }\n\n#define parse_cstr() ({ \\\n const char *cstr = 0; \\\n const char *s = p; \\\n while (p < e) { \\\n if (*p == '\\0') { \\\n cstr = s; \\\n p++; \\\n break; \\\n } \\\n p++; \\\n } \\\n if (!cstr) { \\\n return -1; \\\n } \\\n cstr; \\\n}) \n\n#define parse_int16() ({ \\\n if (e-p < 2) { \\\n return -1; \\\n } \\\n int16_t x = read_i16(p); \\\n p += 2; \\\n x; \\\n})\n\n#define parse_byte() ({ \\\n if (e-p < 1) { \\\n return -1; \\\n } \\\n uint8_t x = *p; \\\n p += 1; \\\n x; \\\n})\n\n#define parse_int32() ({ \\\n if (e-p < 4) { \\\n return -1; \\\n } \\\n int32_t x = read_i32(p); \\\n p += 4; \\\n x; \\\n})\n\n#define parse_bytes(n) ({ \\\n if (e-p < n) { \\\n return -1; \\\n } \\\n const void *s = p; \\\n p += (n); \\\n s; \\\n})\n\nstatic void arg_append_unescape_simplestr(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n char *str2 = xmalloc(slen+1);\n for (size_t i = 0; i < str2len; i++) {\n if (str[i] == '\\'' && str[i+1] == '\\'') {\n i++;\n }\n str2[str2len++] = str[i];\n }\n args_append(args, str2, str2len, false);\n xfree(str2);\n}\n\nstatic void pg_statement_free(struct pg_statement *statement) {\n args_free(&statement->args);\n buf_clear(&statement->argtypes);\n}\n\n\nstatic void pg_portal_free(struct pg_portal *portal) {\n args_free(&portal->params);\n}\n\nstatic void statments_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n pg_statement_free(&statement);\n }\n hashmap_free(map);\n}\n\nstatic void portals_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n pg_portal_free(&portal);\n }\n hashmap_free(map);\n}\n\nstruct pg *pg_new(void) {\n struct pg *pg = xmalloc(sizeof(struct pg));\n memset(pg, 0, sizeof(struct pg));\n pg->oid = TEXTOID;\n return pg;\n}\n\nvoid pg_free(struct pg *pg) {\n if (!pg) {\n return;\n }\n xfree(pg->application_name);\n xfree(pg->database);\n xfree(pg->user);\n buf_clear(&pg->buf);\n statments_free(pg->statements);\n portals_free(pg->portals);\n args_free(&pg->targs);\n // args_free(&pg->xargs);\n xfree(pg->desc);\n xfree(pg);\n}\n\nstatic uint64_t pg_statement_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n return hashmap_murmur(statement.name, strlen(statement.name), seed0, seed1);\n}\n\nstatic uint64_t pg_portal_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n return hashmap_murmur(portal.name, strlen(portal.name), seed0, seed1);\n}\n\nstatic int pg_statement_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_statement stmta;\n memcpy(&stmta, a, sizeof(struct pg_statement));\n struct pg_statement stmtb;\n memcpy(&stmtb, b, sizeof(struct pg_statement));\n return strcmp(stmta.name, stmtb.name);\n}\n\nstatic int pg_portal_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_portal portala;\n memcpy(&portala, a, sizeof(struct pg_portal));\n struct pg_portal portalb;\n memcpy(&portalb, b, sizeof(struct pg_portal));\n return strcmp(portala.name, portalb.name);\n}\n\nstatic void portal_insert(struct pg *pg, struct pg_portal *portal) {\n (void)portal;\n if (!pg->portals) {\n pg->portals = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_portal), 0, 0, 0, pg_portal_hash, \n pg_portal_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->portals, portal);\n if (ptr) {\n struct pg_portal old;\n memcpy(&old, ptr, sizeof(struct pg_portal));\n pg_portal_free(&old);\n }\n}\n\nstatic void statement_insert(struct pg *pg, struct pg_statement *stmt) {\n if (!pg->statements) {\n pg->statements = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_statement), 0, 0, 0, pg_statement_hash, \n pg_statement_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->statements, stmt);\n if (ptr) {\n struct pg_statement old;\n memcpy(&old, ptr, sizeof(struct pg_statement));\n pg_statement_free(&old);\n }\n}\n\nstatic bool statement_get(struct pg *pg, const char *name, \n struct pg_statement *stmt)\n{\n if (!pg->statements) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_statement key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->statements, &key);\n if (!ptr) {\n return false;\n }\n memcpy(stmt, ptr, sizeof(struct pg_statement));\n return true;\n}\n\nstatic bool portal_get(struct pg *pg, const char *name, \n struct pg_portal *portal)\n{\n if (!pg->portals) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_portal key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->portals, &key);\n if (!ptr) {\n return false;\n }\n memcpy(portal, ptr, sizeof(struct pg_portal));\n return true;\n}\n\nstatic const uint8_t hextoks[256] = { \n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4,5,6,7,8,9,0,0,0,0,0,0,\n 0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n};\n\nstatic uint32_t decode_hex(const uint8_t *str) {\n return (((int)hextoks[str[0]])<<12) | (((int)hextoks[str[1]])<<8) |\n (((int)hextoks[str[2]])<<4) | (((int)hextoks[str[3]])<<0);\n}\n\nstatic bool is_surrogate(uint32_t cp) {\n return cp > 55296 && cp < 57344;\n}\n\nstatic uint32_t decode_codepoint(uint32_t cp1, uint32_t cp2) {\n return cp1 > 55296 && cp1 < 56320 && cp2 > 56320 && cp2 < 57344 ?\n ((cp1 - 55296) << 10) | ((cp2 - 56320) + 65536) :\n 65533;\n}\n\nstatic inline int encode_codepoint(uint8_t dst[], uint32_t cp) {\n if (cp < 128) {\n dst[0] = cp;\n return 1;\n } else if (cp < 2048) {\n dst[0] = 192 | (cp >> 6);\n dst[1] = 128 | (cp & 63);\n return 2;\n } else if (cp > 1114111 || is_surrogate(cp)) {\n cp = 65533; // error codepoint\n }\n if (cp < 65536) {\n dst[0] = 224 | (cp >> 12);\n dst[1] = 128 | ((cp >> 6) & 63);\n dst[2] = 128 | (cp & 63);\n return 3;\n }\n dst[0] = 240 | (cp >> 18);\n dst[1] = 128 | ((cp >> 12) & 63);\n dst[2] = 128 | ((cp >> 6) & 63);\n dst[3] = 128 | (cp & 63);\n return 4;\n}\n\n// for_each_utf8 iterates over each UTF-8 bytes in jstr, unescaping along the\n// way. 'f' is a loop expression that will make available the 'ch' char which \n// is just a single byte in a UTF-8 series.\n// this is taken from https://github.com/tidwall/json.c\n#define for_each_utf8(jstr, len, f) { \\\n size_t nn = (len); \\\n int ch = 0; \\\n (void)ch; \\\n for (size_t ii = 0; ii < nn; ii++) { \\\n if ((jstr)[ii] != '\\\\') { \\\n ch = (jstr)[ii]; \\\n if (1) f \\\n continue; \\\n }; \\\n ii++; \\\n if (ii == nn) break; \\\n switch ((jstr)[ii]) { \\\n case '\\\\': ch = '\\\\'; break; \\\n case '/' : ch = '/'; break; \\\n case 'b' : ch = '\\b'; break; \\\n case 'f' : ch = '\\f'; break; \\\n case 'n' : ch = '\\n'; break; \\\n case 'r' : ch = '\\r'; break; \\\n case 't' : ch = '\\t'; break; \\\n case '\"' : ch = '\"'; break; \\\n case 'u' : \\\n if (ii+5 > nn) { nn = 0; continue; }; \\\n uint32_t cp = decode_hex((jstr)+ii+1); \\\n ii += 5; \\\n if (is_surrogate(cp)) { \\\n if (nn-ii >= 6 && (jstr)[ii] == '\\\\' && (jstr)[ii+1] == 'u') { \\\n cp = decode_codepoint(cp, decode_hex((jstr)+ii+2)); \\\n ii += 6; \\\n } \\\n } \\\n uint8_t _bytes[4]; \\\n int _n = encode_codepoint(_bytes, cp); \\\n for (int _j = 0; _j < _n; _j++) { \\\n ch = _bytes[_j]; \\\n if (1) f \\\n } \\\n ii--; \\\n continue; \\\n default: \\\n continue; \\\n }; \\\n if (1) f \\\n } \\\n}\n\nstatic void arg_append_unescape_str(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n uint8_t *str2 = xmalloc(slen+1);\n for_each_utf8((uint8_t*)str, slen, {\n str2[str2len++] = ch;\n });\n args_append(args, (char*)str2, str2len, false);\n xfree(str2);\n}\n\n// Very simple map to stores all params numbers.\nstruct pmap {\n int count;\n int nbuckets;\n uint16_t *buckets;\n uint16_t def[8];\n};\n\nstatic void pmap_init(struct pmap *map) {\n memset(map, 0, sizeof(struct pmap));\n map->nbuckets = sizeof(map->def)/sizeof(uint16_t);\n map->buckets = map->def;\n}\n\nstatic void pmap_free(struct pmap *map) {\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n}\n\nstatic void pmap_insert0(uint16_t *buckets, int nbuckets, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%nbuckets;\n while (1) {\n if (buckets[i] == 0) {\n buckets[i] = param;\n return;\n }\n i = (i+1)%nbuckets;\n }\n}\n\nstatic void pmap_grow(struct pmap *map) {\n int nbuckets2 = map->nbuckets*2;\n uint16_t *buckets2 = xmalloc(nbuckets2*sizeof(uint16_t));\n memset(buckets2, 0, nbuckets2*sizeof(uint16_t));\n for (int i = 0; i < map->nbuckets; i++) {\n if (map->buckets[i]) {\n pmap_insert0(buckets2, nbuckets2, map->buckets[i]);\n }\n }\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n map->buckets = buckets2;\n map->nbuckets = nbuckets2;\n}\n\nstatic void pmap_insert(struct pmap *map, uint16_t param) {\n assert(param != 0);\n if (map->count == (map->nbuckets>>1)+(map->nbuckets>>2)) {\n pmap_grow(map);\n }\n pmap_insert0(map->buckets, map->nbuckets, param);\n map->count++;\n}\n\nstatic bool pmap_exists(struct pmap *map, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%map->nbuckets;\n while (1) {\n if (map->buckets[i] == 0) {\n return false;\n }\n if (map->buckets[i] == param) {\n return true;\n }\n i = (i+1)%map->nbuckets;\n }\n}\n\nstatic bool parse_query_args(const char *query, struct args *args, \n int *nparams, struct buf *argtypes)\n{\n dprintf(\"parse_query: [%s]\\n\", query);\n struct pmap pmap;\n pmap_init(&pmap);\n\n // loop through each keyword\n while (isspace(*query)) {\n query++;\n }\n bool ok = false;\n bool esc = false;\n const char *str;\n const char *p = query;\n bool join = false;\n while (*p) {\n switch (*p) {\n case ';':\n goto break_while;\n case '\\\"':\n // identifier\n parse_errorf(\"idenifiers not allowed\");\n goto done;\n case '\\'':\n // simple string\n p++;\n str = p;\n esc = false;\n while (*p) {\n if (*p == '\\'') {\n if (*(p+1) == '\\'') {\n esc = true;\n p += 2;\n continue;\n }\n break;\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_simplestr(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n case '$':\n // dollar-quote or possible param\n if (*(p+1) >= '0' && *(p+1) <= '9') {\n char *e = 0;\n long param = strtol(p+1, &e, 10);\n if (param == 0 || param > 0xFFFF) {\n parse_errorf(\"there is no parameter $%ld\", param);\n goto done;\n }\n pmap_insert(&pmap, param);\n args_append(args, p, e-p, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'P'+join);\n join = *e && !isspace(*e);\n }\n p = e;\n continue;\n }\n // dollar-quote strings not\n parse_errorf(\"dollar-quote strings not allowed\");\n goto done;\n case 'E': case 'e':\n if (*(p+1) == '\\'') {\n // escaped string\n p += 2;\n str = p;\n while (*p) {\n if (*p == '\\\\') {\n esc = true;\n } else if (*p == '\\'') {\n size_t x = 0;\n while (*(p-x-1) == '\\\\') {\n x++;\n }\n if ((x%2)==0) {\n break;\n }\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_str(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n }\n // fallthrough\n default:\n if (isspace(*p)) {\n p++;\n continue;\n }\n // keyword\n const char *keyword = p;\n while (*p && !isspace(*p)) {\n if (*p == ';' || *p == '\\'' || *p == '\\\"' || *p == '$') {\n break;\n }\n p++;\n }\n size_t keywordlen = p-keyword;\n args_append(args, keyword, keywordlen, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *p && !isspace(*p);\n }\n while (isspace(*p)) {\n p++;\n }\n continue;\n }\n p++;\n }\nbreak_while:\n while (*p) {\n if (*p != ';') {\n parse_errorf(\"unexpected characters at end of query\");\n goto done;\n }\n p++;\n }\n ok = true;\ndone:\n if (ok) {\n // check params\n for (int i = 0; i < pmap.count; i++) {\n if (!pmap_exists(&pmap, i+1)) {\n parse_errorf(\"missing parameter $%d\", i+1);\n ok = false;\n break;\n }\n }\n }\n *nparams = pmap.count;\n pmap_free(&pmap);\n if (argtypes) {\n buf_append_byte(argtypes, '\\0');\n }\n return ok;\n}\n\nstatic bool parse_cache_query_args(const char *query, struct args *args,\n int *maxparam, struct buf *argtypes)\n{\n while (isspace(*query)) {\n query++;\n }\n if (!parse_query_args(query, args, maxparam, argtypes)) {\n return false;\n }\n#ifdef PGDEBUG\n args_print(args);\n#endif\n if (argtypes) {\n dprintf(\"argtypes: [%s]\\n\", argtypes->data);\n }\n return true;\n}\n\nstatic size_t parseQ(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Query\n dprintf(\">>> Query\\n\");\n parse_begin();\n const char *query = parse_cstr();\n parse_end();\n int nparams = 0;\n bool pok = parse_cache_query_args(query, args, &nparams, 0);\n if (!pok) {\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (nparams > 0) {\n parse_seterror(\"query cannot have parameters\");\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (args->len == 0) {\n pg->empty_query = 1;\n }\n return len;\n}\n\nstatic size_t parseP(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Parse\n dprintf(\"<<< Parse\\n\");\n // print_packet(data, len);\n parse_begin();\n const char *stmt_name = parse_cstr();\n const char *query = parse_cstr();\n uint16_t num_param_types = parse_int16();\n // dprintf(\". Parse [%s] [%s] [%d]\\n\", stmt_name, query,\n // (int)num_param_types);\n for (uint16_t i = 0; i < num_param_types; i++) {\n int32_t param_type = parse_int32();\n (void)param_type;\n // dprintf(\". [%d]\\n\", param_type);\n }\n parse_end();\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n int nparams = 0;\n struct buf argtypes = { 0 };\n bool ok = parse_cache_query_args(query, args, &nparams, &argtypes);\n if (!ok) {\n pg->error = 1;\n args_clear(args);\n buf_clear(&argtypes);\n return len;\n }\n // copy over last statement\n struct pg_statement stmt = { 0 };\n strcpy(stmt.name, stmt_name);\n stmt.nparams = nparams;\n // copy over parsed args\n for (size_t i = 0; i < args->len; i++) {\n args_append(&stmt.args, args->bufs[i].data, args->bufs[i].len, false);\n }\n args_clear(args);\n stmt.argtypes = argtypes;\n statement_insert(pg, &stmt);\n pg->parse = 1;\n return len;\n}\n\nstatic size_t parseD(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Describe\n dprintf(\"<<< Describe\\n\");\n if (pg->describe) {\n // Already has a describe in a sequence\n pg->error = 1;\n parse_errorf(\"double describe not allowed\");\n return -1;\n }\n // print_packet(data, len);\n parse_begin();\n uint8_t type = parse_byte();\n const char *name = parse_cstr();\n parse_end();\n\n dprintf(\". Describe [%c] [%s]\\n\", type, name);\n if (type == 'P' || type == 'P'+1) {\n struct pg_portal portal;\n if (!portal_get(pg, name, &portal)) {\n parse_errorf(\"portal not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('T')\n // Int32 length\n // Int16 field_count\n // Field[] fields\n // all fields are unnamed text\n char field[] = { \n 0x00, // \"\\0\" (field name)\n 0x00, 0x00, 0x00, 0x00, // table_oid = 0\n 0x00, 0x00, // column_attr_no = 0\n 0x00, 0x00, 0x00, pg->oid, // type_oid = 25 (text)\n 0xFF, 0xFF, // type_size = -1\n 0xFF, 0xFF, 0xFF, 0xFF, // type_modifier = -1\n 0x00, 0x00, // format_code = 0 (text)\n };\n static_assert(sizeof(field) == 19, \"\");\n size_t size = 1+4+2+portal.params.len*sizeof(field);\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 'T';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, portal.params.len);\n p1 += 2;\n for (size_t i = 0; i < portal.params.len; i++) {\n memcpy(p1, field, sizeof(field));\n p1 += sizeof(field);\n }\n pg->desclen = size;\n return len;\n }\n\n if (type == 'S') {\n struct pg_statement stmt;\n if (!statement_get(pg, name, &stmt)) {\n parse_errorf(\"statement not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('t')\n // Int32 length\n // Int16 num_params\n // Int32[] param_type_oids\n size_t size = 1+4+2+stmt.nparams*4;\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 't';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, stmt.nparams);\n p1 += 2;\n for (int i = 0; i < stmt.nparams; i++) {\n write_i32(p1, pg->oid);\n p1 += 4;\n }\n pg->desclen = size;\n pg->describe = 1;\n return len;\n }\n parse_errorf(\"unsupported describe type '%c'\", type);\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseB(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n\n // Bind\n dprintf(\"<<< Bind\\n\");\n\n // print_packet(data, len);\n\n // X Byte1('B') # Bind message identifier\n // X Int32 length # Message length including self\n //\n // String portal_name # Destination portal (\"\" = unnamed)\n // String statement_name # Prepared statement name (from Parse)\n // Int16 num_format_codes # 0 = all text, 1 = one for all, or N\n // [Int16] format_codes # 0 = text, 1 = binary\n // Int16 num_parameters\n // [parameter values]\n // Int16 num_result_formats\n // [Int16] result_format_codes\n\n parse_begin();\n const char *portal_name = parse_cstr();\n const char *stmt_name = parse_cstr();\n int num_formats = parse_int16();\n for (int i = 0; i < num_formats; i++) {\n int format = parse_int16();\n if (format != 0 && format != 1) {\n parse_errorf(\"only text or binary format allowed\");\n pg->error = 1;\n return len;\n }\n }\n uint16_t num_params = parse_int16();\n args_clear(&pg->targs);\n for (int i = 0; i < num_params; i++) {\n int32_t len = parse_int32();\n if (len <= 0) {\n // Nulls are empty strings\n len = 0;\n }\n const char *b = parse_bytes(len);\n args_append(&pg->targs, b, len, false);\n }\n // ignore result formats\n uint16_t num_result_formats = parse_int16();\n for (int i = 0; i < num_result_formats; i++) {\n int result_format_codes = parse_int16();\n (void)result_format_codes;\n }\n parse_end();\n\n if (strlen(portal_name) >= PGNAMEDATALEN) {\n parse_seterror(\"portal name too large\");\n pg->error = 1;\n return len;\n }\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n struct pg_portal portal = { 0 };\n strcpy(portal.name, portal_name);\n strcpy(portal.stmt, stmt_name);\n memcpy(&portal.params, &pg->targs, sizeof(struct args));\n memset(&pg->targs, 0, sizeof(struct args));\n portal_insert(pg, &portal);\n pg->bind = 1;\n return len;\n}\n\nstatic size_t parseX(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Close\n dprintf(\"<<< Close\\n\");\n parse_begin();\n parse_end();\n pg->close = 1;\n return len;\n}\n\nstatic size_t parseE(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Execute\n dprintf(\"<<< Execute\\n\");\n parse_begin();\n const char *portal_name = parse_cstr();\n size_t max_rows = parse_int32();\n parse_end();\n struct pg_portal portal;\n if (!portal_get(pg, portal_name, &portal)) {\n parse_seterror(\"portal not found\");\n pg->error = 1;\n return len;\n }\n struct pg_statement stmt;\n if (!statement_get(pg, portal.stmt, &stmt)) {\n parse_seterror(\"statement not found\");\n pg->error = 1;\n return len;\n }\n if ((size_t)stmt.nparams != portal.params.len) {\n parse_seterror(\"portal params mismatch\");\n pg->error = 1;\n return len;\n }\n // ignore max_rows\n (void)max_rows;\n\n // \n args_clear(&pg->targs);\n for (size_t i = 0; i < stmt.args.len; i++) {\n const char *arg = stmt.args.bufs[i].data;\n size_t arglen = stmt.args.bufs[i].len;\n char atype = stmt.argtypes.data[i];\n dprintf(\"[%.*s] [%c]\\n\", (int)arglen, arg, atype);\n bool join = false;\n switch (atype) {\n case 'A'+1:\n atype = 'A';\n join = true;\n break;\n case 'P':\n join = false;\n break;\n case 'P'+1:\n atype = 'P';\n join = true;\n break;\n }\n if (atype == 'P') {\n if (arglen == 0 || arg[0] != '$') {\n goto internal_error;\n }\n uint64_t x;\n bool ok = parse_u64(arg+1, arglen-1, &x);\n if (!ok || x == 0 || x > 0xFFFF) {\n goto internal_error;\n }\n size_t paramidx = x-1;\n if (paramidx >= portal.params.len) {\n goto internal_error;\n }\n arg = portal.params.bufs[paramidx].data;\n arglen = portal.params.bufs[paramidx].len;\n }\n if (join) {\n assert(pg->targs.len > 0);\n buf_append(&pg->targs.bufs[pg->targs.len-1], arg, arglen);\n } else {\n args_append(&pg->targs, arg, arglen, false);\n }\n }\n\n struct args swapargs = *args;\n *args = pg->targs;\n pg->targs = swapargs;\n\n#ifdef PGDEBUG\n args_print(args);\n#endif\n\n pg->execute = 1;\n return len;\ninternal_error:\n parse_seterror(\"portal params internal error\");\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseS(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args;\n // Sync\n dprintf(\"<<< Sync\\n\");\n // print_packet(data, len);\n parse_begin();\n parse_end();\n pg->sync = 1;\n return len;\n}\n\nstatic size_t parsep(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // PasswordMessage\n parse_begin();\n const char *password = parse_cstr();\n parse_end();\n if (strcmp(password, auth) != 0) {\n parse_seterror(\n \"WRONGPASS invalid username-password pair or user is disabled.\");\n return -1;\n }\n pg->auth = 1;\n return len;\n}\n\nstatic ssize_t parse_message(const char *data, size_t len, struct args *args,\n struct pg *pg)\n{\n if (len < 5) {\n return 0;\n }\n int msgbyte = data[0];\n size_t msglen = read_i32(data+1);\n if (len < msglen+1) {\n return 0;\n }\n msglen -= 4;\n data += 5;\n ssize_t ret;\n switch (msgbyte) {\n case 'Q':\n ret = parseQ(data, msglen, args, pg);\n break;\n case 'P':\n ret = parseP(data, msglen, args, pg);\n break;\n case 'X':\n ret = parseX(data, msglen, args, pg);\n break;\n case 'E':\n ret = parseE(data, msglen, args, pg);\n break;\n case 'p': // lowercase\n ret = parsep(data, msglen, args, pg);\n break;\n case 'D':\n ret = parseD(data, msglen, args, pg);\n break;\n case 'B':\n ret = parseB(data, msglen, args, pg);\n break;\n case 'S':\n ret = parseS(data, msglen, args, pg);\n break;\n default:\n pg->error = 1;\n parse_errorf(\"unknown message '%c'\", msgbyte);\n ret = msglen;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+5;\n}\n\nstatic ssize_t parse_magic_ssl(const char *data, size_t len, struct pg *pg) {\n (void)data;\n // SSLRequest\n pg->ssl = 1;\n return len;\n}\n\nstatic ssize_t parse_magic_proto3(const char *data, size_t len, struct pg *pg) {\n // StartupMessage\n const char *p = (void*)data;\n const char *e = p+len;\n // Read parameters\n const char *user = \"\";\n const char *database = \"\";\n const char *application_name = \"\";\n const char *client_encoding = \"\";\n const char *name = 0;\n const char *s = (char*)p;\n while (p < e) {\n if (*p == '\\0') {\n if (s != p) {\n if (name) {\n if (strcmp(name, \"database\") == 0) {\n database = s;\n } else if (strcmp(name, \"application_name\") == 0) {\n application_name = s;\n } else if (strcmp(name, \"client_encoding\") == 0) {\n client_encoding = s;\n } else if (strcmp(name, \"user\") == 0) {\n user = s;\n }\n name = 0;\n } else {\n name = s;\n }\n }\n s = p+1;\n }\n p++;\n }\n // dprintf(\". database=%s, application_name=%s, client_encoding=%s, \"\n // \"user=%s\\n\", database, application_name, client_encoding, user);\n if (*client_encoding && strcmp(client_encoding, \"UTF8\") != 0) {\n printf(\"# Invalid Postgres client_encoding (%s)\\n\",\n client_encoding);\n return -1;\n }\n pg->user = xmalloc(strlen(user)+1);\n strcpy((char*)pg->user, user);\n pg->database = xmalloc(strlen(database)+1);\n strcpy((char*)pg->database, database);\n pg->application_name = xmalloc(strlen(application_name)+1);\n strcpy((char*)pg->application_name, application_name);\n pg->startup = 1;\n return p-data;\n}\n\nstatic ssize_t parse_magic_cancel(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n parse_errorf(\"cancel message unsupported\");\n return -1;\n}\n\nstatic ssize_t parse_magic(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n if (len < 4) {\n return 0;\n }\n size_t msglen = read_i32(data);\n if (msglen > 65536) {\n parse_errorf(\"message too large\");\n return -1;\n }\n if (len < msglen) {\n return 0;\n }\n if (msglen < 8) {\n parse_errorf(\"invalid message\");\n return -1;\n }\n // dprintf(\"parse_magic\\n\");\n uint32_t magic = read_i32(data+4);\n data += 8;\n msglen -= 8;\n ssize_t ret;\n switch (magic) {\n case 0x04D2162F: \n ret = parse_magic_ssl(data, msglen, pg);\n break;\n case 0x00030000: \n ret = parse_magic_proto3(data, msglen, pg);\n break;\n case 0xFFFF0000: \n ret = parse_magic_cancel(data, msglen, pg);\n break;\n default:\n parse_errorf(\"Protocol error: unknown magic number %08x\", magic);\n ret = -1;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+8;\n}\n\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pgptr)\n{\n (void)print_packet;\n // print_packet(data, len);\n struct pg *pg = *pgptr;\n if (!pg) {\n pg = pg_new();\n *pgptr = pg;\n }\n pg->error = 0;\n if (len == 0) {\n return 0;\n }\n if (data[0] == 0) {\n return parse_magic(data, len, pg);\n }\n return parse_message(data, len, args, pg);\n}\n\nvoid pg_write_auth(struct conn *conn, unsigned char code) {\n unsigned char bytes[] = { \n 'R', 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n}\n\nvoid pg_write_ready(struct conn *conn, unsigned char code) {\n if (!pg_execute(conn)) {\n unsigned char bytes[] = { \n 'Z', 0x0, 0x0, 0x0, 0x5, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n }\n}\n\nvoid pg_write_status(struct conn *conn, const char *key, const char *val) {\n size_t keylen = strlen(key);\n size_t vallen = strlen(val);\n int32_t size = 4+keylen+1+vallen+1;\n char *bytes = xmalloc(1+size);\n bytes[0] = 'S';\n write_i32(bytes+1, size);\n memcpy(bytes+1+4,key,keylen+1);\n memcpy(bytes+1+4+keylen+1,val,vallen+1);\n conn_write_raw(conn, bytes, 1+size);\n xfree(bytes);\n}\n\nvoid pg_write_row_desc(struct conn *conn, const char **fields, int nfields){\n size_t size = 1+4+2;\n for (int i = 0; i < nfields; i++) {\n size += strlen(fields[i])+1;\n size += 4+2+4+2+4+2;\n }\n int oid = conn_pg(conn)->oid;\n char *bytes = xmalloc(size);\n bytes[0] = 'T';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, nfields); // field_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < nfields; i++) {\n size_t fsize = strlen(fields[i]);\n memcpy(p, fields[i], fsize+1);\n p += fsize+1;\n write_i32(p, 0); // table_oid\n p += 4;\n write_i16(p, 0); // column_attr_number\n p += 2;\n write_i32(p, oid); // type_oid\n p += 4;\n write_i16(p, -1); // type_size\n p += 2;\n write_i32(p, -1); // type_modifier\n p += 4;\n write_i16(p, 1); // format_code\n p += 2;\n }\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_row_data(struct conn *conn, const char **cols, \n const size_t *collens, int ncols)\n{\n size_t size = 1+4+2;\n for (int i = 0; i < ncols; i++) {\n size += 4+collens[i];\n }\n char *bytes = xmalloc(size);\n bytes[0] = 'D';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, ncols); // column_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < ncols; i++) {\n write_i32(p, collens[i]); // column_length\n p += 4;\n#ifdef PGDEBUG\n printf(\" ROW >>>> len:%zu [\", collens[i]);\n binprint(cols[i], collens[i]);\n printf(\"]\\n\");\n#endif\n memcpy(p, cols[i], collens[i]); // column_data\n p += collens[i];\n }\n \n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_complete(struct conn *conn, const char *tag){\n size_t taglen = strlen(tag);\n size_t size = 1+4+taglen+1;\n char *bytes = xmalloc(size);\n bytes[0] = 'C';\n write_i32(bytes+1, size-1); // message_size\n memcpy(bytes+1+4, tag, taglen+1);\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_completef(struct conn *conn, const char *tag_format, ...){\n // initializing list pointer\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_complete(conn, tag);\n}\n\nvoid pg_write_simple_row_data_ready(struct conn *conn, const char *desc,\n const void *row, size_t len, const char *tag)\n{\n pg_write_row_desc(conn, (const char*[]){ desc }, 1);\n pg_write_row_data(conn, (const char*[]){ row }, (size_t[]){ len }, 1);\n pg_write_complete(conn, tag);\n pg_write_ready(conn, 'I');\n}\n\nvoid pg_write_simple_row_str_ready(struct conn *conn, const char *desc,\n const char *row, const char *tag)\n{\n pg_write_simple_row_data_ready(conn, desc, row, strlen(row), tag);\n}\n\nvoid pg_write_simple_row_i64_ready(struct conn *conn, const char *desc,\n int64_t row, const char *tag)\n{\n char val[32];\n snprintf(val, sizeof(val), \"%\" PRIi64, row);\n pg_write_simple_row_str_ready(conn, desc, val, tag);\n}\n\nvoid pg_write_simple_row_str_readyf(struct conn *conn, const char *desc,\n const char *row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_str_ready(conn, desc, row, tag);\n}\n\nvoid pg_write_simple_row_i64_readyf(struct conn *conn, const char *desc,\n int64_t row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_i64_ready(conn, desc, row, tag);\n}\n\nstatic void write_auth_ok(struct conn *conn, struct pg *pg) {\n // dprintf(\">> AuthOK\\n\");\n pg_write_auth(conn, 0); // AuthOK;\n // startup message received, respond\n pg_write_status(conn, \"client_encoding\", \"UTF8\");\n pg_write_status(conn, \"server_encoding\", \"UTF8\");\n char status[128];\n snprintf(status, sizeof(status), \"%s (Pogocache)\", version);\n pg_write_status(conn, \"server_version\", status);\n pg_write_ready(conn, 'I'); // Idle;\n pg->ready = 1;\n}\n\n// Respond to various the connection states.\n// Returns true if the all responses complete or false if there was an\n// error.\nbool pg_respond(struct conn *conn, struct pg *pg) {\n if (pg->error) {\n conn_write_error(conn, parse_lasterror());\n return true;\n }\n if (pg->empty_query) {\n dprintf(\"====== pg_respond(pg->empty_query) =====\\n\");\n conn_write_raw(conn, \"I\\0\\0\\0\\4\", 5);\n conn_write_raw(conn, \"Z\\0\\0\\0\\5I\", 6);\n pg->empty_query = 0;\n return true;\n }\n if (pg->parse) {\n dprintf(\"====== pg_respond(pg->parse) =====\\n\");\n conn_write_raw(conn, \"1\\0\\0\\0\\4\", 5);\n pg->parse = 0;\n return true;\n }\n if (pg->bind) {\n dprintf(\"====== pg_respond(pg->bind) =====\\n\");\n conn_write_raw(conn, \"2\\0\\0\\0\\4\", 5);\n pg->bind = 0;\n return true;\n }\n if (pg->describe) {\n dprintf(\"====== pg_respond(pg->describe) =====\\n\");\n assert(pg->desc);\n conn_write_raw(conn, pg->desc, pg->desclen);\n xfree(pg->desc);\n pg->desc = 0;\n pg->desclen = 0;\n pg->describe = 0;\n return true;\n }\n if (pg->sync) {\n dprintf(\"====== pg_respond(pg->sync) =====\\n\");\n pg->execute = 0;\n pg_write_ready(conn, 'I');\n pg->sync = 0;\n return true;\n }\n if (pg->close) {\n dprintf(\"====== pg_respond(pg->close) =====\\n\");\n pg->close = 0;\n return false;\n }\n if (pg->ssl == 1) {\n if (!conn_istls(conn)) {\n conn_write_raw_cstr(conn, \"N\");\n } else {\n conn_write_raw_cstr(conn, \"Y\");\n }\n pg->ssl = 0;\n return true;\n }\n if (pg->auth == 1) {\n if (pg->startup == 0) {\n return false;\n }\n conn_setauth(conn, true);\n write_auth_ok(conn, pg);\n pg->auth = 0;\n return true;\n }\n if (pg->startup == 1) {\n if (auth && *auth) {\n pg_write_auth(conn, 3); // AuthenticationCleartextPassword;\n } else {\n write_auth_ok(conn, pg);\n pg->startup = 0;\n }\n return true;\n }\n return true;\n}\n\nvoid pg_write_error(struct conn *conn, const char *msg) {\n size_t msglen = strlen(msg);\n size_t size = 1+4;\n size += 1+5+1; // 'S' \"ERROR\" \\0\n size += 1+5+1; // 'V' \"ERROR\" \\0\n size += 1+5+1; // 'C' \"23505\" \\0\n size += 1+msglen+1; // 'M' msg \\0\n size += 1; // null-terminator\n char *bytes = xmalloc(size);\n bytes[0] = 'E';\n write_i32(bytes+1, size-1);\n char *p = bytes+1+4;\n memcpy(p, \"SERROR\", 7);\n p += 7;\n memcpy(p, \"VERROR\", 7);\n p += 7;\n memcpy(p, \"C23505\", 7);\n p += 7;\n p[0] = 'M';\n p++;\n memcpy(p, msg, msglen+1);\n p += msglen+1;\n p[0] = '\\0';\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\n// return true if the command need further execution, of false if this\n// operation handled it already\nbool pg_precommand(struct conn *conn, struct args *args, struct pg *pg) {\n#ifdef PGDEBUG\n printf(\"precommand: \");\n args_print(args);\n#endif\n if (args->len > 0 && args->bufs[0].len > 0) {\n char c = tolower(args->bufs[0].data[0]);\n if (c == 'b' || c == 'r' || c == 'c') {\n // silently ignore transaction commands.\n if (c == 'b' && argeq(args, 0, \"begin\")) {\n pg_write_completef(conn, \"BEGIN\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"rollback\")) {\n pg_write_completef(conn, \"ROLLBACK\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"commit\")) {\n pg_write_completef(conn, \"COMMIT\");\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n if (c == ':' && args->bufs[0].len > 1 && args->bufs[0].data[1] == ':') {\n if (argeq(args, 0, \"::bytea\") || argeq(args, 0, \"::bytes\")) {\n pg->oid = BYTEAOID;\n } else if (argeq(args, 0, \"::text\")) {\n pg->oid = TEXTOID;\n } else {\n char err[128];\n snprintf(err, sizeof(err), \"unknown type '%.*s'\", \n (int)(args->bufs[0].len-2), args->bufs[0].data+2);\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n return false;\n }\n args_remove_first(args);\n if (args->len == 0) {\n if (pg->oid == BYTEAOID) {\n pg_write_completef(conn, \"BYTEA\");\n } else {\n pg_write_completef(conn, \"TEXT\");\n }\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n }\n return true;\n}\n"], ["/pogocache/src/http.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit http.c provides the parser for the HTTP wire protocol.\n#define _GNU_SOURCE \n#include \n#include \n#include \n#include \n#include \"stats.h\"\n#include \"util.h\"\n#include \"parse.h\"\n\nextern const bool useauth;\nextern const char *auth;\n\nbool http_valid_key(const char *key, size_t len) {\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] >= 0x7F || key[i] == '%' || key[i] == '+' ||\n key[i] == '@' || key[i] == '$' || key[i] == '?' || key[i] == '=') \n {\n return false;\n }\n }\n return true;\n}\n\nssize_t parse_http(const char *data, size_t len, struct args *args, \n int *httpvers, bool *keepalive)\n{\n *keepalive = false;\n *httpvers = 0;\n const char *method = 0;\n size_t methodlen = 0;\n const char *uri = 0;\n size_t urilen = 0;\n int proto = 0;\n const char *hdrname = 0; \n size_t hdrnamelen = 0;\n const char *hdrval = 0;\n size_t hdrvallen = 0;\n size_t bodylen = 0;\n bool nocontentlength = true;\n bool html = false;\n const char *authhdr = 0;\n size_t authhdrlen = 0;\n const char *p = data;\n const char *e = p+len;\n const char *s = p;\n while (p < e) {\n if (*p == ' ') {\n method = s;\n methodlen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == ' ') {\n uri = s;\n urilen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n if (p-s-1 != 8 || !bytes_const_eq(s, 5, \"HTTP/\") || \n s[5] < '0' || s[5] > '9' || s[6] != '.' || \n s[7] < '0' || s[7] > '9')\n {\n goto badproto;\n }\n proto = (s[5]-'0')*10+(s[7]-'0');\n if (proto < 9 || proto >= 30) {\n goto badproto;\n }\n if (proto >= 11) {\n *keepalive = true;\n }\n *httpvers = proto;\n p++;\n goto readhdrs;\n }\n \n p++;\n }\n goto badreq;\nreadhdrs:\n // Parse the headers, pulling the pairs along the way.\n while (p < e) {\n hdrname = p;\n while (p < e) {\n if (*p == ':') {\n hdrnamelen = p-hdrname;\n p++;\n while (p < e && *p == ' ') {\n p++;\n }\n hdrval = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n hdrvallen = p-hdrval-1;\n // printf(\"[%.*s]=[%.*s]\\n\", (int)hdrnamelen, hdrname,\n // (int)hdrvallen, hdrval);\n // We have a new header pair (hdrname, hdrval);\n if (argeq_bytes(hdrname, hdrnamelen, \"content-length\")){\n uint64_t x;\n if (!parse_u64(hdrval, hdrvallen, &x) || \n x > MAXARGSZ)\n {\n stat_store_too_large_incr(0);\n goto badreq;\n }\n bodylen = x;\n nocontentlength = false;\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"connection\"))\n {\n *keepalive = argeq_bytes(hdrval, hdrvallen, \n \"keep-alive\");\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"accept\"))\n {\n if (memmem(hdrval, hdrvallen, \"text/html\", 9) != 0){\n html = true;\n }\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"authorization\"))\n {\n authhdr = hdrval;\n authhdrlen = hdrvallen;\n }\n p++;\n if (p < e && *p == '\\r') {\n p++;\n if (p < e && *p == '\\n') {\n p++;\n } else {\n goto badreq;\n }\n goto readbody;\n }\n break;\n }\n p++;\n }\n break;\n }\n p++;\n }\n }\n return 0;\nreadbody:\n // read the content body\n if ((size_t)(e-p) < bodylen) {\n return 0;\n }\n const char *body = p;\n p = e;\n\n // check\n if (urilen == 0 || uri[0] != '/') {\n goto badreq;\n }\n uri++;\n urilen--;\n const char *ex = 0;\n size_t exlen = 0;\n const char *flags = 0;\n size_t flagslen = 0;\n const char *cas = 0;\n size_t caslen = 0;\n const char *qauth = 0;\n size_t qauthlen = 0;\n bool xx = false;\n bool nx = false;\n // Parse the query string, pulling the pairs along the way.\n size_t querylen = 0;\n const char *query = memchr(uri, '?', urilen);\n if (query) {\n querylen = urilen-(query-uri);\n urilen = query-uri;\n query++;\n querylen--;\n const char *qkey;\n size_t qkeylen;\n const char *qval;\n size_t qvallen;\n size_t j = 0;\n size_t k = 0;\n for (size_t i = 0; i < querylen; i++) {\n if (query[i] == '=') {\n k = i;\n i++;\n for (; i < querylen; i++) {\n if (query[i] == '&') {\n break;\n }\n }\n qval = query+k+1;\n qvallen = i-k-1;\n qkeyonly:\n qkey = query+j;\n qkeylen = k-j;\n // We have a new query pair (qkey, qval);\n if (bytes_const_eq(qkey, qkeylen, \"flags\")) {\n flags = qval;\n flagslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"ex\") || \n bytes_const_eq(qkey, qkeylen, \"ttl\"))\n {\n ex = qval;\n exlen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"cas\")) {\n cas = qval;\n caslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"xx\")) {\n xx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"nx\")) {\n nx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"auth\")) {\n qauth = qval;\n qauthlen = qvallen;\n }\n j = i+1;\n } else if (query[i] == '&' || i == querylen-1) {\n qval = 0;\n qvallen = 0;\n if (i == querylen-1) {\n i++;\n }\n k = i;\n goto qkeyonly;\n }\n }\n }\n // The entire HTTP request is complete.\n // Turn request into valid command arguments.\n if (bytes_const_eq(method, methodlen, \"GET\")) {\n if (urilen > 0 && uri[0] == '@') {\n // system command such as @stats or @flushall\n goto badreq;\n } else if (urilen == 0) {\n goto showhelp;\n } else {\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"get\", 3, true);\n args_append(args, uri, urilen, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"PUT\")) {\n if (nocontentlength) {\n // goto badreq;\n }\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"set\", 3, true);\n args_append(args, uri, urilen, true);\n args_append(args, body, bodylen, true);\n if (cas) {\n args_append(args, \"cas\", 3, true);\n args_append(args, cas, caslen, true);\n }\n if (ex) {\n args_append(args, \"ex\", 2, true);\n args_append(args, ex, exlen, true);\n }\n if (flags) {\n args_append(args, \"flags\", 5, true);\n args_append(args, flags, flagslen, true);\n }\n if (xx) {\n args_append(args, \"xx\", 2, true);\n }\n if (nx) {\n args_append(args, \"nx\", 2, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"DELETE\")) {\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"del\", 3, true);\n args_append(args, uri, urilen, true);\n } else {\n parse_seterror(\"Method Not Allowed\");\n goto badreq;\n }\n\n // Check authorization\n const char *authval = 0;\n size_t authvallen = 0;\n if (qauthlen > 0) {\n authval = qauth;\n authvallen = qauthlen;\n } else if (authhdrlen > 0) {\n if (authhdrlen >= 7 && strncmp(authhdr, \"Bearer \", 7) == 0) {\n authval = authhdr + 7;\n authvallen = authhdrlen - 7;\n } else {\n goto unauthorized;\n }\n }\n if (useauth || authvallen > 0) {\n stat_auth_cmds_incr(0);\n size_t authlen = strlen(auth);\n if (authvallen != authlen || memcmp(auth, authval, authlen) != 0) {\n stat_auth_errors_incr(0);\n goto unauthorized;\n }\n\n }\n return e-data;\nbadreq:\n parse_seterror(\"Bad Request\");\n return -1;\nbadproto:\n parse_seterror(\"Bad Request\");\n return -1;\nbadkey:\n parse_seterror(\"Invalid Key\");\n return -1;\nunauthorized:\n parse_seterror(\"Unauthorized\");\n return -1;\nshowhelp:\n if (html) {\n parse_seterror(\"Show Help HTML\");\n } else {\n parse_seterror(\"Show Help TEXT\");\n }\n return -1;\n}\n"], ["/pogocache/src/conn.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit conn.c are interface functions for a network connection.\n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"args.h\"\n#include \"cmds.h\"\n#include \"xmalloc.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"helppage.h\"\n\n#define MAXPACKETSZ 1048576 // Maximum read packet size\n\nstruct conn {\n struct net_conn *conn5; // originating connection\n struct buf packet; // current incoming packet\n int proto; // connection protocol (memcache, http, etc)\n bool auth; // user is authorized\n bool noreply; // only for memcache\n bool keepalive; // only for http\n int httpvers; // only for http\n struct args args; // command args, if any\n struct pg *pg; // postgres context, only if proto is postgres\n};\n\nbool conn_istls(struct conn *conn) {\n return net_conn_istls(conn->conn5);\n}\n\nint conn_proto(struct conn *conn) {\n return conn->proto;\n}\n\nbool conn_auth(struct conn *conn) {\n return conn->auth;\n}\n\nvoid conn_setauth(struct conn *conn, bool ok) {\n conn->auth = ok;\n}\n\nbool conn_isclosed(struct conn *conn) {\n return net_conn_isclosed(conn->conn5);\n}\n\nvoid conn_close(struct conn *conn) {\n net_conn_close(conn->conn5);\n}\n\nvoid evopened(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = xmalloc(sizeof(struct conn));\n memset(conn, 0, sizeof(struct conn));\n conn->conn5 = conn5;\n net_conn_setudata(conn5, conn);\n}\n\nvoid evclosed(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n buf_clear(&conn->packet);\n args_free(&conn->args);\n pg_free(conn->pg);\n xfree(conn);\n}\n\n// network data handler\n// The evlen may be zero when returning from a bgwork routine, while having\n// existing data in the connection packet.\nvoid evdata(struct net_conn *conn5, const void *evdata, size_t evlen,\n void *udata)\n{\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n if (conn_isclosed(conn)) {\n goto close;\n }\n#ifdef DATASETOK\n if (evlen == 14 && memcmp(evdata, \"*1\\r\\n$4\\r\\nPING\\r\\n\", 14) == 0) {\n conn_write_raw(conn, \"+PONG\\r\\n\", 7);\n } else if (evlen == 13 && memcmp(evdata, \"*2\\r\\n$3\\r\\nGET\\r\\n\", 13) == 0) {\n conn_write_raw(conn, \"$1\\r\\nx\\r\\n\", 7);\n } else {\n conn_write_raw(conn, \"+OK\\r\\n\", 5);\n }\n return;\n#endif\n char *data;\n size_t len;\n bool copied;\n if (conn->packet.len == 0) {\n data = (char*)evdata;\n len = evlen;\n copied = false;\n } else {\n buf_append(&conn->packet, evdata, evlen);\n len = conn->packet.len;\n data = conn->packet.data;\n copied = true;\n }\n while (len > 0 && !conn_isclosed(conn)) {\n // Parse the command\n ssize_t n = parse_command(data, len, &conn->args, &conn->proto, \n &conn->noreply, &conn->httpvers, &conn->keepalive, &conn->pg);\n if (n == 0) {\n // Not enough data provided yet.\n break;\n } else if (n == -1) {\n // Protocol error occurred.\n conn_write_error(conn, parse_lasterror());\n if (conn->proto == PROTO_MEMCACHE) {\n // Memcache doesn't close, but we'll need to know the last\n // character position to continue and revert back to it so\n // we can attempt to continue to the next command.\n n = parse_lastmc_n();\n } else {\n // Close on protocol error\n conn_close(conn);\n break;\n }\n } else if (conn->args.len == 0) {\n // There were no command arguments provided.\n if (conn->proto == PROTO_POSTGRES) {\n if (!pg_respond(conn, conn->pg)) {\n // close connection\n conn_close(conn);\n break;\n }\n } else if (conn->proto == PROTO_MEMCACHE) {\n // Memcache simply returns a nondescript error.\n conn_write_error(conn, \"ERROR\");\n } else if (conn->proto == PROTO_HTTP) {\n // HTTP must always return arguments.\n assert(!\"PROTO_HTTP\");\n } else if (conn->proto == PROTO_RESP) {\n // RESP just continues until it gets args.\n }\n } else if (conn->proto == PROTO_POSTGRES && !conn->pg->ready) {\n // This should not have been reached. The client did not \n // send a startup message\n conn_close(conn);\n break;\n } else if (conn->proto != PROTO_POSTGRES || \n pg_precommand(conn, &conn->args, conn->pg))\n {\n evcommand(conn, &conn->args);\n }\n len -= n;\n data += n;\n if (net_conn_bgworking(conn->conn5)) {\n // BGWORK(0)\n break;\n }\n if (conn->proto == PROTO_HTTP) {\n conn_close(conn);\n }\n }\n if (conn_isclosed(conn)) {\n goto close;\n }\n if (len == 0) {\n if (copied) {\n if (conn->packet.cap > MAXPACKETSZ) {\n buf_clear(&conn->packet);\n }\n conn->packet.len = 0;\n }\n } else {\n if (copied) {\n memmove(conn->packet.data, data, len);\n conn->packet.len = len;\n } else {\n buf_append(&conn->packet, data, len);\n }\n }\n return;\nclose:\n conn_close(conn);\n}\n\nstruct bgworkctx {\n struct conn *conn;\n void *udata;\n void(*work)(void *udata);\n void(*done)(struct conn *conn, void *udata);\n};\n\nstatic void work5(void *udata) {\n struct bgworkctx *ctx = udata;\n ctx->work(ctx->udata);\n}\n\nstatic void done5(struct net_conn *conn, void *udata) {\n (void)conn;\n struct bgworkctx *ctx = udata;\n ctx->done(ctx->conn, ctx->udata);\n xfree(ctx);\n}\n\n// conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool conn_bgwork(struct conn *conn, void(*work)(void *udata), \n void(*done)(struct conn *conn, void *udata), void *udata)\n{\n struct bgworkctx *ctx = xmalloc(sizeof(struct bgworkctx));\n ctx->conn = conn;\n ctx->udata = udata;\n ctx->work = work;\n ctx->done = done;\n if (!net_conn_bgwork(conn->conn5, work5, done5, ctx)) {\n xfree(ctx);\n return false;\n }\n return true;\n}\n\nstatic void writeln(struct conn *conn, char ch, const void *data, ssize_t len) {\n if (len < 0) {\n len = strlen(data);\n }\n net_conn_out_ensure(conn->conn5, 3+len);\n net_conn_out_write_byte_nocheck(conn->conn5, ch);\n size_t mark = net_conn_out_len(conn->conn5);\n net_conn_out_write_nocheck(conn->conn5, data, len);\n net_conn_out_write_byte_nocheck(conn->conn5, '\\r');\n net_conn_out_write_byte_nocheck(conn->conn5, '\\n');\n uint8_t *out = (uint8_t*)net_conn_out(conn->conn5);\n for (ssize_t i = mark; i < len; i++) {\n if (out[i] < ' ') {\n out[i] = ' ';\n }\n }\n}\n\nstatic void write_error(struct conn *conn, const char *err, bool server) {\n if (conn->proto == PROTO_MEMCACHE) {\n if (strstr(err, \"ERR \") == err) {\n // convert to client or server error\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n if (server) {\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err+4); \n } else {\n snprintf(err2, err2sz, \"CLIENT_ERROR %s\\r\\n\", err+4); \n }\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n if (server) {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else if (strstr(err, \"CLIENT_ERROR \") == err || \n strstr(err, \"CLIENT_ERROR \") == err)\n {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"%s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n conn_write_raw(conn, \"ERROR\\r\\n\", 7);\n }\n }\n } else if (conn->proto == PROTO_POSTGRES) {\n if (strstr(err, \"ERR \") == err) {\n err = err+4;\n }\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n } else if (conn->proto == PROTO_HTTP) {\n if (strstr(err, \"ERR \") == err) {\n err += 4;\n }\n if (strcmp(err, \"Show Help HTML\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_HTML, -1);\n } else if (strcmp(err, \"Show Help TEXT\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_TEXT, -1);\n } else if (strcmp(err, \"Method Not Allowed\") == 0) {\n conn_write_http(conn, 405, \"Method Not Allowed\", \n \"Method Not Allowed\\r\\n\", -1);\n } else if (strcmp(err, \"Unauthorized\") == 0) {\n conn_write_http(conn, 401, \"Unauthorized\", \n \"Unauthorized\\r\\n\", -1);\n } else if (strcmp(err, \"Bad Request\") == 0) {\n conn_write_http(conn, 400, \"Bad Request\", \n \"Bad Request\\r\\n\", -1);\n } else {\n size_t sz = strlen(err)+32;\n char *err2 = xmalloc(sz);\n snprintf(err2, sz, \"ERR %s\\r\\n\", err);\n conn_write_http(conn, 500, \"Internal Server Error\", \n err2, -1);\n xfree(err2);\n }\n } else {\n writeln(conn, '-', err, -1);\n }\n}\n\nvoid conn_write_error(struct conn *conn, const char *err) {\n bool server = false;\n if (strcmp(err, ERR_OUT_OF_MEMORY) == 0) {\n server = true;\n }\n write_error(conn, err, server);\n}\n\nvoid conn_write_string(struct conn *conn, const char *cstr) {\n writeln(conn, '+', cstr, -1);\n}\n\nvoid conn_write_null(struct conn *conn) {\n net_conn_out_write(conn->conn5, \"$-1\\r\\n\", 5);\n}\n\nvoid resp_write_bulk(struct buf *buf, const void *data, size_t len) {\n uint8_t str[32];\n size_t n = u64toa(len, str);\n buf_append_byte(buf, '$');\n buf_append(buf, str, n);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n buf_append(buf, data, len);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n}\n\nvoid conn_write_bulk(struct conn *conn, const void *data, size_t len) {\n net_conn_out_ensure(conn->conn5, 32+len);\n size_t olen = net_conn_out_len(conn->conn5);\n uint8_t *base = (uint8_t*)net_conn_out(conn->conn5)+olen;\n uint8_t *p = base;\n *(p++) = '$';\n p += u64toa(len, p);\n *(p++) = '\\r';\n *(p++) = '\\n';\n memcpy(p, data, len);\n p += len;\n *(p++) = '\\r';\n *(p++) = '\\n';\n net_conn_out_setlen(conn->conn5, olen + (p-base));\n}\n\nvoid conn_write_raw(struct conn *conn, const void *data, size_t len) {\n net_conn_out_write(conn->conn5, data, len);\n}\n\nvoid conn_write_http(struct conn *conn, int code, const char *status,\n const void *body, ssize_t bodylen)\n{\n if (bodylen == -1) {\n if (!body) {\n body = status;\n }\n bodylen = strlen(body);\n }\n char resp[512];\n size_t n = snprintf(resp, sizeof(resp), \n \"HTTP/1.1 %d %s\\r\\n\"\n \"Content-Length: %zu\\r\\n\"\n \"Connection: Close\\r\\n\"\n \"\\r\\n\",\n code, status, bodylen);\n conn_write_raw(conn, resp, n);\n if (bodylen > 0) {\n conn_write_raw(conn, body, bodylen);\n }\n}\n\nvoid conn_write_array(struct conn *conn, size_t count) {\n uint8_t str[24];\n size_t n = u64toa(count, str);\n writeln(conn, '*', str, n);\n}\n\nvoid conn_write_uint(struct conn *conn, uint64_t value) {\n uint8_t buf[24];\n size_t n = u64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, '+', buf, n); // the '+' is needed for unsigned int\n }\n}\n\nvoid conn_write_int(struct conn *conn, int64_t value) {\n uint8_t buf[24];\n size_t n = i64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, ':', buf, n);\n }\n}\n\nvoid conn_write_raw_cstr(struct conn *conn, const char *cstr) {\n conn_write_raw(conn, cstr, strlen(cstr));\n}\n\nvoid conn_write_bulk_cstr(struct conn *conn, const char *cstr) {\n conn_write_bulk(conn, cstr, strlen(cstr));\n}\n\nvoid stat_cmd_get_incr(struct conn *conn) {\n net_stat_cmd_get_incr(conn->conn5);\n}\n\nvoid stat_cmd_set_incr(struct conn *conn) {\n net_stat_cmd_set_incr(conn->conn5);\n}\n\nvoid stat_get_hits_incr(struct conn *conn) {\n net_stat_get_hits_incr(conn->conn5);\n}\n\nvoid stat_get_misses_incr(struct conn *conn) {\n net_stat_get_misses_incr(conn->conn5);\n}\n\nbool pg_execute(struct conn *conn) {\n return conn->pg->execute;\n}\n\nstruct pg *conn_pg(struct conn *conn) {\n return conn->pg;\n}\n"], ["/pogocache/src/resp.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit resp.c provides the parser for the RESP wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args) {\n char *err = NULL;\n struct buf arg = { 0 };\n bool inarg = false;\n char quote = '\\0';\n for (size_t i = 0; i < len; i++) {\n char ch = bytes[i];\n if (inarg) {\n if (quote) {\n if (ch == '\\n') {\n goto fail_quotes;\n }\n if (ch == quote) { \n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n inarg = false;\n if (ch == '\\n') {\n i--;\n continue;\n }\n if (!isspace(ch)) {\n goto fail_quotes;\n }\n continue;\n } else if (ch == '\\\\') {\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n switch (ch) {\n case 'n': ch = '\\n'; break;\n case 'r': ch = '\\r'; break;\n case 't': ch = '\\t'; break;\n }\n }\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n } else {\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else if (isspace(ch)) {\n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n if (ch == '\\n') {\n break;\n }\n inarg = false;\n } else {\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n } else {\n if (ch == '\\n') {\n buf_clear(&arg);\n return i+1;\n }\n if (isspace(ch)) {\n continue;\n }\n inarg = true;\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else {\n quote = 0;\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n }\n buf_clear(&arg);\n return 0;\nfail_quotes:\n if (!err) err = \"ERR Protocol error: unbalanced quotes in request\";\nfail_nargs:\n if (!err) err = \"ERR Protocol error: invalid multibulk length\";\nfail_argsz:\n if (!err) err = \"ERR Protocol error: invalid bulk length\";\n/* fail: */\n if (err) {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \"%s\", err);\n }\n buf_clear(&arg);\n return -1;\n}\n\nstatic int64_t read_num(const char *data, size_t len, int64_t min, int64_t max,\n bool *ok)\n{\n errno = 0;\n char *end;\n int64_t x = strtoll(data, &end, 10);\n *ok = errno == 0 && (size_t)(end-data) == len && x >= min && x <= max;\n return x;\n}\n\n#define read_resp_num(var, min, max, errmsg) { \\\n char *p = memchr(bytes, '\\r', end-bytes); \\\n if (!p) { \\\n if (end-bytes > 32) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n return 0; \\\n } \\\n if (p+1 == end) { \\\n return 0; \\\n } \\\n if (*(p+1) != '\\n') { \\\n return -1; \\\n } \\\n bool ok; \\\n var = read_num(bytes, p-bytes, min, max, &ok); \\\n if (!ok) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n bytes = p+2; \\\n}\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp(const char *bytes, size_t len, struct args *args) {\n const char *start = bytes;\n const char *end = bytes+len;\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '*') {\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nargs;\n read_resp_num(nargs, LONG_MIN, MAXARGS, \"invalid multibulk length\");\n for (int j = 0; j < nargs; j++) {\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '$') {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \n \"ERR Protocol error: expected '$', got '%c'\", *(bytes-1));\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nbytes;\n read_resp_num(nbytes, 0, MAXARGSZ, \"invalid bulk length\");\n if (nbytes+2 > end-bytes) {\n return 0;\n }\n args_append(args, bytes, nbytes, true);\n bytes += nbytes+2;\n }\n return bytes-start;\n}\n\n"], ["/pogocache/src/util.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit util.c provides various utilities and convenience functions.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n\n// Performs a case-insenstive equality test between the byte slice 'data' and\n// a c-string. It's expected that c-string is already lowercase and \n// null-terminated. The data does not need to be null-terminated.\nbool argeq_bytes(const void *data, size_t datalen, const char *cstr) {\n const char *p = data;\n const char *e = p+datalen;\n bool eq = true;\n while (eq && p < e && *cstr) {\n eq = tolower(*p) == *cstr;\n p++;\n cstr++;\n }\n return eq && *cstr == '\\0' && p == e;\n}\n\nbool argeq(struct args *args, int idx, const char *cstr) {\n return argeq_bytes(args->bufs[idx].data, args->bufs[idx].len, cstr);\n}\n\n// Safely adds two int64_t values and with clamping on overflow.\nint64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n// Safely multiplies two int64_t values and with clamping on overflow.\nint64_t int64_mul_clamp(int64_t a, int64_t b) {\n if (a || b) {\n if (a > 0) {\n if (b > 0 && a > INT64_MAX / b) {\n return INT64_MAX;\n } else if (b < 0 && b < INT64_MIN / a) {\n return INT64_MIN;\n }\n } else {\n if (b > 0 && a < INT64_MIN / b) {\n return INT64_MIN;\n } else if (b < 0 && a < INT64_MAX / b) {\n return INT64_MAX;\n }\n }\n }\n return a * b;\n}\n\n/// https://github.com/tidwall/varint.c\nint varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nint varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\nint varint_write_i64(void *data, int64_t x) {\n uint64_t ux = (uint64_t)x << 1;\n ux = x < 0 ? ~ux : ux;\n return varint_write_u64(data, ux);\n}\n\nint varint_read_i64(const void *data, size_t len, int64_t *x) {\n uint64_t ux;\n int n = varint_read_u64(data, len, &ux);\n *x = (int64_t)(ux >> 1);\n *x = ux&1 ? ~*x : *x;\n return n;\n}\n\n\nconst char *memstr(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0fB\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fK\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fM\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1fG\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0G\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0M\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0K\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\nconst char *memstr_long(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0f bytes\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f KB\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f MB\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1f GB\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0 GB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 MB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 KB\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nuint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\nuint64_t rand_next(uint64_t *seed) {\n // pcg + mix13\n *seed = (*seed * UINT64_C(6364136223846793005)) + 1;\n return mix13(*seed);\n}\n\nvoid write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n bytes[4] = (x>>32)&0xFF;\n bytes[5] = (x>>40)&0xFF;\n bytes[6] = (x>>48)&0xFF;\n bytes[7] = (x>>56)&0xFF;\n}\n\nuint64_t read_u64(const void *data) {\n const uint8_t *bytes = data;\n uint64_t x = 0;\n x |= ((uint64_t)bytes[0])<<0;\n x |= ((uint64_t)bytes[1])<<8;\n x |= ((uint64_t)bytes[2])<<16;\n x |= ((uint64_t)bytes[3])<<24;\n x |= ((uint64_t)bytes[4])<<32;\n x |= ((uint64_t)bytes[5])<<40;\n x |= ((uint64_t)bytes[6])<<48;\n x |= ((uint64_t)bytes[7])<<56;\n return x;\n}\n\nvoid write_u32(void *data, uint32_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n}\n\nuint32_t read_u32(const void *data) {\n const uint8_t *bytes = data;\n uint32_t x = 0;\n x |= ((uint32_t)bytes[0])<<0;\n x |= ((uint32_t)bytes[1])<<8;\n x |= ((uint32_t)bytes[2])<<16;\n x |= ((uint32_t)bytes[3])<<24;\n return x;\n}\n\n// https://www.w3.org/TR/2003/REC-PNG-20031110/#D-CRCAppendix\nuint32_t crc32(const void *data, size_t len) {\n static __thread uint32_t table[256];\n static __thread bool computed = false;\n if (!computed) {\n for (uint32_t n = 0; n < 256; n++) {\n uint32_t c = n;\n for (int k = 0; k < 8; k++) {\n c = (c&1)?0xedb88320L^(c>>1):c>>1;\n }\n table[n] = c;\n }\n computed = true;\n }\n uint32_t crc = ~0;\n const uint8_t *buf = data;\n for (size_t n = 0; n < len; n++) {\n crc = table[(crc^buf[n])&0xff]^(crc>>8);\n }\n return ~crc;\n}\n\n// Attempts to read exactly len bytes from file stream\n// Returns the number of bytes read. Anything less than len means the stream\n// was closed or an error occured while reading.\n// Return -1 if no bytes were read and there was an error.\nssize_t read_full(int fd, void *data, size_t len) {\n uint8_t *bytes = data;\n size_t total = 0;\n while (len > 0) {\n ssize_t n = read(fd, bytes+total, len);\n if (n <= 0) {\n if (total > 0) {\n break;\n }\n return n;\n }\n len -= n;\n total += n;\n }\n return total;\n}\n\nsize_t u64toa(uint64_t x, uint8_t *data) {\n if (x < 10) {\n data[0] = '0'+x;\n return 1;\n }\n size_t i = 0;\n do {\n data[i++] = '0' + x % 10;\n } while ((x /= 10) > 0);\n // reverse the characters\n for (size_t j = 0, k = i-1; j < k; j++, k--) {\n uint8_t ch = data[j];\n data[j] = data[k];\n data[k] = ch;\n }\n return i;\n}\n\nsize_t i64toa(int64_t x, uint8_t *data) {\n if (x < 0) {\n data[0] = '-';\n data++;\n return u64toa(x * -1, data) + 1;\n }\n return u64toa(x, data);\n}\n\nuint32_t fnv1a_case(const char* buf, size_t len) {\n uint32_t hash = 0x811c9dc5;\n for (size_t i = 0; i < len; i++) {\n hash = (hash ^ tolower(buf[i])) * 0x01000193;\n }\n\treturn hash;\n}\n\nbool parse_i64(const char *data, size_t len, int64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n errno = 0;\n char *end;\n *x = strtoll(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool parse_u64(const char *data, size_t len, uint64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n if (buf[0] == '-') {\n return false;\n }\n errno = 0;\n char *end;\n *x = strtoull(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool argi64(struct args *args, int idx, int64_t *x) {\n return parse_i64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nbool argu64(struct args *args, int idx, uint64_t *x) {\n return parse_u64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nvoid *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\nvoid store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// Increment a morris counter. The counter is clipped to 31 bits\nuint8_t morris_incr(uint8_t morris, uint64_t rand) {\n return morris>=31?31:morris+!(rand&((UINT64_C(1)< '~') {\n printf(\"\\\\x%02x\", c);\n } else {\n printf(\"%c\", c);\n }\n }\n}\n"], ["/pogocache/src/pogocache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit pogocache.c is the primary caching engine library, which is designed\n// to be standalone and embeddable.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"pogocache.h\"\n\n#define MINLOADFACTOR_RH 55 // 55%\n#define MAXLOADFACTOR_RH 95 // 95%\n#define DEFLOADFACTOR 75 // 75%\n#define SHRINKAT 10 // 10%\n#define DEFSHARDS 4096 // default number of shards\n#define INITCAP 64 // intial number of buckets per shard\n\n// #define DBGCHECKENTRY\n// #define EVICTONITER\n// #define HALFSECONDTIME\n// #define NO48BITPTRS\n\n#if INTPTR_MAX == INT64_MAX\n#ifdef NO48BITPTRS\n#define PTRSIZE 8\n#else\n#define PTRSIZE 6\n#endif\n#elif INTPTR_MAX == INT32_MAX\n#define PTRSIZE 4\n#else\n#error Unknown pointer size\n#endif\n\nstatic struct pogocache_count_opts defcountopts = { 0 };\nstatic struct pogocache_total_opts deftotalopts = { 0 };\nstatic struct pogocache_size_opts defsizeopts = { 0 };\nstatic struct pogocache_sweep_opts defsweepopts = { 0 };\nstatic struct pogocache_clear_opts defclearopts = { 0 };\nstatic struct pogocache_store_opts defstoreopts = { 0 };\nstatic struct pogocache_load_opts defloadopts = { 0 };\nstatic struct pogocache_delete_opts defdeleteopts = { 0 };\nstatic struct pogocache_iter_opts defiteropts = { 0 };\nstatic struct pogocache_sweep_poll_opts defsweeppollopts = { 0 };\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// returns monotonic nanoseconds of the CPU clock.\nstatic int64_t gettime(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// returns offset of system clock since first call in thread.\nstatic int64_t getnow(void) {\n return gettime();\n}\n\n// https://github.com/tidwall/th64\nstatic uint64_t th64(const void *data, size_t len, uint64_t seed) {\n uint8_t*p=(uint8_t*)data,*e=p+len;\n uint64_t r=0x14020a57acced8b7,x,h=seed;\n while(p+8<=e)memcpy(&x,p,8),x*=r,p+=8,x=x<<31|x>>33,h=h*r^x,h=h<<31|h>>33;\n while(p>31,h*=r,h^=h>>31,h*=r,h^=h>>31,h*=r,h);\n}\n\n// Load a pointer from an unaligned memory.\nstatic void *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\n// Store a pointer into unaligned memory.\nstatic void store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nstatic uint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\n// Sixpack compression algorithm\n// - Converts a simple 8-bit string into 6-bit string.\n// - Intended to be used on small strings that only use characters commonly\n// used for keys in KV data stores.\n// - Allows the following 64 item character set:\n// -.0123456789:ABCDEFGHIJKLMNOPRSTUVWXY_abcdefghijklmnopqrstuvwxy\n// Note that the characters \"QZz\" are not included.\n// - Sortable and comparable using memcmp.\nstatic char tosix[256] = {\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0-15\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, // 32-47\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 0, 0, // 48-63\n 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, // 64-79\n 29, 0, 30, 31, 32, 33, 34, 35, 36, 37, 0, 0, 0, 0, 0, 38, // 80-95\n 0, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, // 96-111\n 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, // 112-127\n};\n\nstatic char fromsix[] = {\n 0, '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', '_', 'a', 'b', 'c',\n 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'\n};\n\n// 0: [000000..] bitpos: 0\n// 1: [00000011][1111....] bitpos: 6\n// 2: [00000011][11112222][22......] bitpos: 12 \n// 3: [00000011][11112222][22333333] bitpos: 18\n\n// Sixpack data\n// Fills the data in dst and returns the number of bytes filled.\n// Returns 0 if not a sixpackable.\n// The dst array must be large enough to hold packed value\nstatic int sixpack(const char *data, int len, char dst[]){\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n for (int i = 0; i < len; i++) {\n int k6v = tosix[bytes[i]];\n if (k6v == 0) {\n return 0;\n }\n if (i%4 == 0) {\n dst[j++] = k6v<<2;\n } else if (i%4 == 1) {\n dst[j-1] |= k6v>>4;\n dst[j++] = k6v<<4;\n } else if (i%4 == 2) {\n dst[j-1] |= k6v>>2;\n dst[j++] = k6v<<6;\n } else {\n dst[j-1] |= k6v;\n }\n }\n return j;\n}\n\n// (Un)sixpack data.\n// Fills the data in dst and returns the len of original data.\n// The data must be sixpacked and len must be > 0.\n// The dst array must be large enough to hold unpacked value\nstatic int unsixpack(const char *data, int len, char dst[]) {\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n int k = 0;\n for (int i = 0; i < len; i++) {\n if (k == 0) {\n dst[j++] = fromsix[bytes[i]>>2];\n k++;\n } else if (k == 1) {\n dst[j++] = fromsix[((bytes[i-1]<<4)|(bytes[i]>>4))&63];\n k++;\n } else {\n dst[j++] = fromsix[((bytes[i-1]<<2)|(bytes[i]>>6))&63];\n dst[j++] = fromsix[bytes[i]&63];\n k = 0;\n }\n }\n if (j > 0 && dst[j-1] == 0) {\n j--;\n }\n return j;\n}\n\n// Safely adds two int64_t values, clamping on overflow.\nstatic int64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n/// https://github.com/tidwall/varint.c\nstatic int varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nstatic int varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\n#ifdef HALFSECONDTIME\ntypedef uint32_t etime_t;\n#else\ntypedef int64_t etime_t;\n#endif\n\n\n// Mostly a copy of the pogocache_opts, but used internally\n// See the opts_to_ctx function for translation.\nstruct pgctx {\n void *(*malloc)(size_t);\n void (*free)(void*);\n size_t (*malloc_size)(void*);\n void (*yield)(void *udata);\n void (*evicted)(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata);\n void *udata;\n bool usecas;\n bool nosixpack;\n bool noevict;\n bool allowshrink;\n bool usethreadbatch;\n int nshards;\n double loadfactor;\n double shrinkfactor;\n uint64_t seed;\n};\n\n// The entry structure is a simple allocation with all the fields, being \n// variable in size, slammed together contiguously. There's a one byte header\n// that provides information about what is available in the structure.\n// The format is: (header,time,expires?,flags?,cas?,key,value)\n// The expires, flags, and cas fields are optional. The optionality depends on\n// header bit flags.\nstruct entry;\n\n// Returns the sizeof the entry struct, which takes up no space at all.\n// This would be like doing a sizeof(struct entry), if entry had a structure.\nstatic size_t entry_struct_size(void) {\n return 0;\n}\n\n// Returns the data portion of the entry, which is the entire allocation.\nstatic const uint8_t *entry_data(const struct entry *entry) {\n return (uint8_t*)entry;\n}\n\nstatic int64_t entry_expires(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n int64_t x = 0;\n if ((hdr>>0)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\nstatic int64_t entry_time(struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n etime_t etime;\n memcpy(&etime, p+1, sizeof(etime_t));\n#ifdef HALFSECONDTIME\n int64_t time = (int64_t)etime * INT64_C(500000000);\n#else \n int64_t time = etime;\n#endif \n return time;\n}\n\nstatic void entry_settime(struct entry *entry, int64_t time) {\n const uint8_t *p = entry_data(entry);\n#ifdef HALFSECONDTIME\n // Eviction time is stored as half seconds.\n etime_t etime = time / INT64_C(500000000);\n etime = etime > UINT32_MAX ? UINT32_MAX : etime;\n#else\n etime_t etime = time;\n#endif\n memcpy((uint8_t*)(p+1), &etime, sizeof(etime_t));\n}\n\nstatic int entry_alive_exp(int64_t expires, int64_t etime, int64_t now,\n int64_t cleartime)\n{\n return etime < cleartime ? POGOCACHE_REASON_CLEARED :\n expires > 0 && expires <= now ? POGOCACHE_REASON_EXPIRED :\n 0;\n}\n\nstatic int entry_alive(struct entry *entry, int64_t now, int64_t cleartime) {\n int64_t etime = entry_time(entry);\n int64_t expires = entry_expires(entry);\n return entry_alive_exp(expires, etime, now, cleartime);\n}\n\nstatic uint64_t entry_cas(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n uint64_t x = 0;\n if ((hdr>>2)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\n// returns the key. If using sixpack make sure to copy the result asap.\nstatic const char *entry_key(const struct entry *entry, size_t *keylen_out,\n char buf[128])\n{\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n if ((hdr>>3)&1) {\n keylen = unsixpack(key, (int)keylen, buf);\n key = buf;\n }\n *keylen_out = keylen;\n return key;\n}\n\n// returns the raw key. sixpack will be returned in it's raw format\nstatic const char *entry_rawkey(const struct entry *entry, size_t *keylen_out) {\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n *keylen_out = keylen;\n return key;\n}\n\nstatic bool entry_sixpacked(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p);\n return (hdr>>3)&1;\n}\n\nstatic size_t entry_extract(const struct entry *entry, const char **key,\n size_t *keylen, char buf[128], const char **val, size_t *vallen, \n int64_t *expires, uint32_t *flags, uint64_t *cas,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n if (expires) {\n memcpy(expires, p, 8);\n }\n p += 8; // expires\n } else {\n if (expires) {\n *expires = 0;\n }\n }\n if ((hdr>>1)&1) {\n if (flags) {\n memcpy(flags, p, 4);\n }\n p += 4; // flags\n } else {\n if (flags) {\n *flags = 0;\n }\n }\n if (ctx->usecas) {\n if (cas) {\n memcpy(cas, p, 8);\n }\n p += 8; // cas\n } else {\n if (cas) {\n *cas = 0;\n }\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n if (key) {\n *key = (char*)p;\n *keylen = x;\n if ((hdr>>3)&1) {\n *keylen = unsixpack(*key, (int)*keylen, buf);\n *key = buf;\n }\n }\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n if (val) {\n *val = (char*)p;\n *vallen = x;\n }\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\nstatic size_t entry_memsize(const struct entry *entry,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if (ctx->usecas) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\n// The 'cas' param should always be set to zero unless loading from disk. \n// Setting to zero will set a new unique cas to the entry.\nstatic struct entry *entry_new(const char *key, size_t keylen, const char *val,\n size_t vallen, int64_t expires, uint32_t flags, uint64_t cas,\n struct pgctx *ctx)\n{\n bool usesixpack = !ctx->nosixpack;\n#ifdef DBGCHECKENTRY\n // printf(\"entry_new(key=[%.*s], keylen=%zu, val=[%.*s], vallen=%zu, \"\n // \"expires=%\" PRId64 \", flags=%\" PRId32 \", cas=%\" PRIu64 \", \"\n // \"usesixpack=%d\\n\", (int)keylen, key, keylen, (int)vallen, key, vallen,\n // expires, flags, cas, usesixpack);\n int64_t oexpires = expires;\n uint32_t oflags = flags;\n uint64_t ocas = cas;\n const char *okey = key;\n size_t okeylen = keylen;\n const char *oval = val;\n size_t ovallen = vallen;\n#endif\n uint8_t hdr = 0;\n uint8_t keylenbuf[10];\n uint8_t vallenbuf[10];\n int nexplen, nflagslen, ncaslen, nkeylen, nvallen;\n if (expires > 0) {\n hdr |= 1;\n nexplen = 8;\n } else {\n nexplen = 0;\n }\n if (flags > 0) {\n hdr |= 2;\n nflagslen = 4;\n } else {\n nflagslen = 0;\n }\n if (ctx->usecas) {\n hdr |= 4;\n ncaslen = 8;\n } else {\n ncaslen = 0;\n }\n char buf[128];\n if (usesixpack && keylen <= 128) {\n size_t len = sixpack(key, keylen, buf);\n if (len > 0) {\n hdr |= 8;\n keylen = len;\n key = buf;\n }\n }\n nkeylen = varint_write_u64(keylenbuf, keylen);\n nvallen = varint_write_u64(vallenbuf, vallen);\n struct entry *entry_out = 0;\n size_t size = entry_struct_size()+1+sizeof(etime_t)+nexplen+nflagslen+\n ncaslen+nkeylen+keylen+nvallen+vallen;\n // printf(\"malloc=%p size=%zu, ctx=%p\\n\", ctx->malloc, size, ctx);\n void *mem = ctx->malloc(size);\n struct entry *entry = mem;\n if (!entry) {\n return 0;\n }\n uint8_t *p = (void*)entry_data(entry);\n *(p++) = hdr;\n memset(p, 0, sizeof(etime_t));\n p += sizeof(etime_t); // time\n if (nexplen > 0) {\n memcpy(p, &expires, nexplen);\n p += nexplen;\n }\n if (nflagslen > 0) {\n memcpy(p, &flags, nflagslen);\n p += nflagslen;\n }\n if (ncaslen > 0) {\n memcpy(p, &cas, ncaslen);\n p += ncaslen;\n }\n memcpy(p, keylenbuf, nkeylen);\n p += nkeylen;\n memcpy(p, key, keylen);\n p += keylen;\n memcpy(p, vallenbuf, nvallen);\n p += nvallen;\n memcpy(p, val, vallen);\n p += vallen;\n entry_out = entry;\n#ifdef DBGCHECKENTRY\n // check the key\n const char *key2, *val2;\n size_t keylen2, vallen2;\n int64_t expires2;\n uint32_t flags2;\n uint64_t cas2;\n char buf1[256];\n entry_extract(entry_out, &key2, &keylen2, buf1, &val2, &vallen2, &expires2,\n &flags2, &cas2, ctx);\n assert(expires2 == oexpires);\n assert(flags2 == oflags);\n assert(cas2 == ocas);\n assert(keylen2 == okeylen);\n assert(memcmp(key2, okey, okeylen) == 0);\n assert(vallen2 == ovallen);\n assert(memcmp(val2, oval, ovallen) == 0);\n#endif\n return entry_out;\n}\n\nstatic void entry_free(struct entry *entry, struct pgctx *ctx) {\n ctx->free(entry);\n}\n\nstatic int entry_compare(const struct entry *a, const struct entry *b) {\n size_t akeylen, bkeylen;\n char buf1[256], buf2[256];\n const char *akey;\n const char *bkey;\n if (entry_sixpacked(a) == entry_sixpacked(b)) {\n akey = entry_rawkey(a, &akeylen);\n bkey = entry_rawkey(b, &bkeylen);\n } else {\n akey = entry_key(a, &akeylen, buf1);\n bkey = entry_key(b, &bkeylen, buf2);\n }\n size_t size = akeylen < bkeylen ? akeylen : bkeylen;\n int cmp = memcmp(akey, bkey, size);\n if (cmp == 0) {\n cmp = akeylen < bkeylen ? -1 : akeylen > bkeylen;\n }\n return cmp;\n}\n\n#ifndef HASHSIZE\n#define HASHSIZE 3\n#endif\n#if HASHSIZE < 1 || HASHSIZE > 4\n#error bad hash size\n#endif\n\nstruct bucket {\n uint8_t entry[PTRSIZE]; // 48-bit pointer\n uint8_t hash[HASHSIZE]; // 24-bit hash\n uint8_t dib; // distance to bucket\n};\n\nstatic_assert(sizeof(struct bucket) == PTRSIZE+HASHSIZE+1, \"bad bucket size\");\n\nstruct map {\n int cap; // initial capacity\n int nbuckets; // number of buckets\n int count; // current entry count\n int mask; // bit mask for \n int growat;\n int shrinkat;\n struct bucket *buckets;\n uint64_t total; // current entry count\n size_t entsize; // memory size of all entries\n \n};\n\nstruct shard {\n atomic_uintptr_t lock; // spinlock (batch pointer)\n uint64_t cas; // compare and store value\n int64_t cleartime; // last clear time\n int clearcount; // number of items cleared\n struct map map; // robinhood hashmap\n // for batch linked list only\n struct shard *next;\n};\n\nstatic void lock_init(struct shard *shard) {\n atomic_init(&shard->lock, 0);\n}\n\nstruct batch {\n struct pogocache *cache; // associated cache.\n struct shard *shard; // first locked shard\n int64_t time; // timestamp\n};\n\nstruct pogocache {\n bool isbatch; \n union {\n struct pgctx ctx;\n struct batch batch;\n };\n struct shard shards[];\n};\n\nstatic struct entry *get_entry(struct bucket *bucket) {\n return load_ptr(bucket->entry);\n}\n\nstatic void set_entry(struct bucket *bucket, struct entry *entry) {\n store_ptr(bucket->entry, entry);\n}\n\n#if HASHSIZE == 1\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFF;\n}\nstatic void write_hash(uint8_t data[1], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[1]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n return hash;\n}\n#elif HASHSIZE == 2\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFF;\n}\nstatic void write_hash(uint8_t data[2], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[2]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n return hash;\n}\n#elif HASHSIZE == 3\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFFFF;\n}\nstatic void write_hash(uint8_t data[3], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[3]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n return hash;\n}\n#else \nstatic uint32_t clip_hash(uint32_t hash) {\n return hash;\n}\nstatic void write_hash(uint8_t data[4], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n data[3] = (hash>>24)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[4]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n hash |= ((uint64_t)data[3])<<24;\n return hash;\n}\n#endif\n\nstatic uint32_t get_hash(struct bucket *bucket) {\n return read_hash(bucket->hash);\n}\n\nstatic void set_hash(struct bucket *bucket, uint32_t hash) {\n write_hash(bucket->hash, hash);\n}\n\nstatic uint8_t get_dib(struct bucket *bucket) {\n return bucket->dib;\n}\n\nstatic void set_dib(struct bucket *bucket, uint8_t dib) {\n bucket->dib = dib;\n}\n\nstatic bool map_init(struct map *map, size_t cap, struct pgctx *ctx) {\n map->cap = cap;\n map->nbuckets = cap;\n map->count = 0;\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * ctx->loadfactor;\n map->shrinkat = map->nbuckets * ctx->shrinkfactor;\n size_t size = sizeof(struct bucket)*map->nbuckets;\n map->buckets = ctx->malloc(size);\n if (!map->buckets) {\n // nomem\n memset(map, 0, sizeof(struct map));\n return false;\n }\n memset(map->buckets, 0, size);\n return true;\n}\n\nstatic bool resize(struct map *map, size_t new_cap, struct pgctx *ctx) {\n struct map map2;\n if (!map_init(&map2, new_cap, ctx)) {\n return false;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket ebkt = map->buckets[i];\n if (get_dib(&ebkt)) {\n set_dib(&ebkt, 1);\n size_t j = get_hash(&ebkt) & map2.mask;\n while (1) {\n if (get_dib(&map2.buckets[j]) == 0) {\n map2.buckets[j] = ebkt;\n break;\n }\n if (get_dib(&map2.buckets[j]) < get_dib(&ebkt)) {\n struct bucket tmp = map2.buckets[j];\n map2.buckets[j] = ebkt;\n ebkt = tmp;\n }\n j = (j + 1) & map2.mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n }\n }\n int org_cap = map->cap;\n int org_count = map->count;\n ctx->free(map->buckets);\n memcpy(map, &map2, sizeof(struct map));\n map->cap = org_cap;\n map->count = org_count;\n return true;\n}\n\nstatic bool map_insert(struct map *map, struct entry *entry, uint32_t hash,\n struct entry **old, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*2, ctx)) {\n *old = 0;\n return false;\n }\n }\n map->entsize += entry_memsize(entry, ctx);\n struct bucket ebkt;\n set_entry(&ebkt, entry);\n set_hash(&ebkt, hash);\n set_dib(&ebkt, 1);\n size_t i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n // new entry\n map->buckets[i] = ebkt;\n map->count++;\n map->total++;\n *old = 0;\n return true;\n }\n if (get_hash(&ebkt) == get_hash(&map->buckets[i]) && \n entry_compare(get_entry(&ebkt), get_entry(&map->buckets[i])) == 0)\n {\n // replaced\n *old = get_entry(&map->buckets[i]);\n map->entsize -= entry_memsize(*old, ctx);\n set_entry(&map->buckets[i], get_entry(&ebkt));\n return true;\n }\n if (get_dib(&map->buckets[i]) < get_dib(&ebkt)) {\n struct bucket tmp = map->buckets[i];\n map->buckets[i] = ebkt;\n ebkt = tmp;\n }\n i = (i + 1) & map->mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n}\n\nstatic bool bucket_eq(struct map *map, size_t i, const char *key,\n size_t keylen, uint32_t hash)\n{\n if (get_hash(&map->buckets[i]) != hash) {\n return false;\n }\n size_t keylen2;\n char buf[128];\n const char *key2 = entry_key(get_entry(&map->buckets[i]), &keylen2, buf);\n return keylen == keylen2 && memcmp(key, key2, keylen) == 0;\n}\n\n// Returns the bucket index for key, or -1 if not found.\nstatic int map_get_bucket(struct map *map, const char *key, size_t keylen,\n uint32_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while (1) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n return -1;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return i;\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic struct entry *map_get_entry(struct map *map, const char *key,\n size_t keylen, uint32_t hash, int *bkt_idx_out)\n{\n int i = map_get_bucket(map, key, keylen, hash);\n *bkt_idx_out = i;\n return i >= 0 ? get_entry(&map->buckets[i]) : 0;\n}\n\n// This deletes entry from bucket and adjusts the dibs buckets to right, if\n// needed.\nstatic void delbkt(struct map *map, size_t i) {\n set_dib(&map->buckets[i], 0);\n while (1) {\n size_t h = i;\n i = (i + 1) & map->mask;\n if (get_dib(&map->buckets[i]) <= 1) {\n set_dib(&map->buckets[h], 0);\n break;\n }\n map->buckets[h] = map->buckets[i];\n set_dib(&map->buckets[h], get_dib(&map->buckets[h])-1);\n }\n map->count--;\n}\n\nstatic bool needsshrink(struct map *map, struct pgctx *ctx) {\n return ctx->allowshrink && map->nbuckets > map->cap && \n map->count <= map->shrinkat;\n}\n\n// Try to shrink the hashmap. If needed, this will allocate a new hashmap that\n// has fewer buckets and move all existing entries into the smaller map.\n// The 'multi' param is a hint that multi entries may have been deleted, such\n// as with the iter or clear operations.\n// If the resize fails due to an allocation error then the existing hashmap\n// will be retained.\nstatic void tryshrink(struct map *map, bool multi, struct pgctx *ctx) {\n if (!needsshrink(map, ctx)) {\n return;\n }\n int cap;\n if (multi) {\n // Determine how many buckets are needed to store all entries.\n cap = map->cap;\n int growat = cap * ctx->loadfactor;\n while (map->count >= growat) {\n cap *= 2;\n growat = cap * ctx->loadfactor;\n }\n } else {\n // Just half the buckets\n cap = map->nbuckets / 2;\n }\n resize(map, cap, ctx);\n}\n\n// delete an entry at bucket position. not called directly\nstatic struct entry *delentry_at_bkt(struct map *map, size_t i, \n struct pgctx *ctx)\n{\n struct entry *old = get_entry(&map->buckets[i]);\n assert(old);\n map->entsize -= entry_memsize(old, ctx);\n delbkt(map, i);\n return old;\n}\n\nstatic struct entry *map_delete(struct map *map, const char *key,\n size_t keylen, uint32_t hash, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n int i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n return 0;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return delentry_at_bkt(map, i, ctx);\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic size_t evict_entry(struct shard *shard, int shardidx, \n struct entry *entry, int64_t now, int reason, struct pgctx *ctx)\n{\n char buf[128];\n size_t keylen;\n const char *key = entry_key(entry, &keylen, buf);\n uint32_t hash = th64(key, keylen, ctx->seed);\n struct entry *del = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(del == entry); (void)del;\n if (ctx->evicted) {\n // Notify user that an entry was evicted.\n const char *val;\n size_t vallen;\n int64_t expires = 0;\n uint32_t flags = 0;\n uint64_t cas = 0;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val,\n vallen, expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n size_t size = entry_memsize(entry, ctx);\n entry_free(entry, ctx);\n return size;\n}\n\n// evict an entry using the 2-random algorithm.\n// Pick two random entries and delete the one with the oldest access time.\n// Do not evict the entry if it matches the provided hash.\nstatic void auto_evict_entry(struct shard *shard, int shardidx, uint32_t hash,\n int64_t now, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n struct map *map = &shard->map;\n struct entry *entries[2];\n int count = 0;\n for (int i = 1; i < map->nbuckets && count < 2; i++) {\n size_t j = (i+hash)&(map->nbuckets-1);\n struct bucket *bkt = &map->buckets[j];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry has expired. Evict this one instead.\n evict_entry(shard, shardidx, entry, now, reason, ctx);\n return;\n }\n if (get_hash(bkt) == hash) {\n continue;\n }\n entries[count++] = entry;\n }\n int choose;\n if (count == 1) {\n choose = 0;\n } else if (count == 2) {\n // We now have two candidates.\n if (entry_time(entries[0]) < entry_time(entries[1])) {\n choose = 0;\n } else {\n choose = 1;\n }\n } else {\n return;\n }\n evict_entry(shard, shardidx, entries[choose], now, POGOCACHE_REASON_LOWMEM,\n ctx);\n}\n\nstatic void shard_deinit(struct shard *shard, struct pgctx *ctx) {\n struct map *map = &shard->map;\n if (!map->buckets) {\n return;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n entry_free(entry, ctx);\n }\n ctx->free(map->buckets);\n}\n\nstatic bool shard_init(struct shard *shard, struct pgctx *ctx) {\n memset(shard, 0, sizeof(struct shard));\n lock_init(shard);\n shard->cas = 1;\n if (!map_init(&shard->map, INITCAP, ctx)) {\n // nomem\n shard_deinit(shard, ctx);\n return false;\n }\n return true;\n}\n\n/// Free all cache and shard hashmap allocations.\n/// This does not access the value data in any of the entries. If it is needed\n/// for the further cleanup at an entry value level, then use the\n/// pogocache_iter to perform the cleanup on each entry before calling this\n/// operation.\n/// Also this is not threadsafe. Make sure that other threads are not\n/// currently using the cache concurrently nor after this function is called.\nvoid pogocache_free(struct pogocache *cache) {\n if (!cache) {\n return;\n }\n struct pgctx *ctx = &cache->ctx;\n for (int i = 0; i < cache->ctx.nshards; i++) {\n shard_deinit(&cache->shards[i], ctx);\n }\n cache->ctx.free(cache);\n}\n\nstatic void opts_to_ctx(int nshards, struct pogocache_opts *opts,\n struct pgctx *ctx)\n{\n ctx->nshards = nshards;\n int loadfactor = 0;\n if (opts) {\n ctx->yield = opts->yield;\n ctx->evicted = opts->evicted;\n ctx->udata = opts->udata;\n ctx->usecas = opts->usecas;\n ctx->nosixpack = opts->nosixpack;\n ctx->noevict = opts->noevict;\n ctx->seed = opts->seed;\n loadfactor = opts->loadfactor;\n ctx->allowshrink = opts->allowshrink;\n ctx->usethreadbatch = opts->usethreadbatch;\n }\n // make loadfactor a floating point\n loadfactor = loadfactor == 0 ? DEFLOADFACTOR :\n loadfactor < MINLOADFACTOR_RH ? MINLOADFACTOR_RH :\n loadfactor > MAXLOADFACTOR_RH ? MAXLOADFACTOR_RH :\n loadfactor;\n ctx->loadfactor = ((double)loadfactor/100.0);\n ctx->shrinkfactor = ((double)SHRINKAT/100.0);\n}\n\nstatic struct pogocache_opts newdefopts = { 0 };\n\n/// Returns a new cache or null if there is not enough memory available.\n/// See 'pogocache_opts' for all options.\nstruct pogocache *pogocache_new(struct pogocache_opts *opts) {\n if (!opts) {\n opts = &newdefopts;\n }\n void *(*_malloc)(size_t) = opts->malloc ? opts->malloc : malloc;\n void (*_free)(void*) = opts->free ? opts->free : free;\n int shards = !opts || opts->nshards <= 0 ? DEFSHARDS : opts->nshards;\n size_t size = sizeof(struct pogocache)+shards*sizeof(struct shard);\n struct pogocache *cache = _malloc(size);\n if (!cache) {\n return 0;\n }\n memset(cache, 0, sizeof(struct pogocache));\n struct pgctx *ctx = &cache->ctx;\n opts_to_ctx(shards, opts, ctx);\n ctx->malloc = _malloc;\n ctx->free = _free;\n for (int i = 0; i < ctx->nshards; i++) {\n if (!shard_init(&cache->shards[i], ctx)) {\n // nomem\n pogocache_free(cache);\n return 0;\n }\n }\n return cache;\n}\n\nstatic int shard_index(struct pogocache *cache, uint64_t hash) {\n return (hash>>32)%cache->ctx.nshards;\n}\n\nstatic struct shard *shard_get(struct pogocache *cache, int index) {\n return &cache->shards[index];\n}\n\n/// Returns a timestamp.\nint64_t pogocache_now(void) {\n return getnow();\n}\n\nstatic __thread struct pogocache thbatch;\n\nstruct pogocache *pogocache_begin(struct pogocache *cache) {\n struct pogocache *batch;\n if (cache->ctx.usethreadbatch) {\n batch = &thbatch;\n } else {\n batch = cache->ctx.malloc(sizeof(struct pogocache));\n if (!batch) {\n return 0;\n }\n }\n batch->isbatch = true;\n batch->batch.cache = cache;\n batch->batch.shard = 0;\n batch->batch.time = 0;\n return batch;\n}\n\nvoid pogocache_end(struct pogocache *batch) {\n assert(batch->isbatch);\n struct shard *shard = batch->batch.shard;\n while (shard) {\n struct shard *next = shard->next;\n shard->next = 0;\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE);\n shard = next;\n }\n if (!batch->batch.cache->ctx.usethreadbatch) {\n batch->batch.cache->ctx.free(batch);\n }\n}\n\nstatic void lock(struct batch *batch, struct shard *shard, struct pgctx *ctx) {\n if (batch) {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n (uintptr_t)(void*)batch, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n shard->next = batch->shard;\n batch->shard = shard;\n break;\n }\n if (val == (uintptr_t)(void*)batch) {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n } else {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n UINTPTR_MAX, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n }\n}\n\nstatic bool acquire_for_scan(int shardidx, struct shard **shard_out, \n struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *shard_out = shard;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// acquire a lock for the key\nstatic bool acquire_for_key(const char *key, size_t keylen, uint32_t *hash_out,\n struct shard **shard_out, int *shardidx_out, struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n uint64_t fhash = th64(key, keylen, cache->ctx.seed);\n int shardidx = shard_index(cache, fhash);\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *hash_out = fhash;\n *shard_out = shard;\n *shardidx_out = shardidx;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// Acquire a lock on the shard for key and execute the provided operation.\n#define ACQUIRE_FOR_KEY_AND_EXECUTE(rettype, key, keylen, op) ({ \\\n int shardidx; \\\n uint32_t hash; \\\n struct shard *shard; \\\n bool usebatch = acquire_for_key((key), (keylen), &hash, &shard, &shardidx, \\\n &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)shardidx, (void)hash, (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\n// Acquire a lock on the shard at index and execute the provided operation.\n#define ACQUIRE_FOR_SCAN_AND_EXECUTE(rettype, shardidx, op) ({ \\\n struct shard *shard; \\\n bool usebatch = acquire_for_scan((shardidx), &shard, &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\nstatic int loadop(const void *key, size_t keylen, \n struct pogocache_load_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defloadopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n // Get the entry bucket index for the entry with key.\n int bidx = map_get_bucket(&shard->map, key, keylen, hash);\n if (bidx == -1) {\n return POGOCACHE_NOTFOUND;\n }\n // Extract the bucket, entry, and values.\n struct bucket *bkt = &shard->map.buckets[bidx];\n struct entry *entry = get_entry(bkt);\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. Evict the entry and clear the bucket.\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(entry, ctx);\n delbkt(&shard->map, bidx);\n return POGOCACHE_NOTFOUND;\n }\n if (!opts->notouch) {\n entry_settime(entry, now);\n }\n if (opts->entry) {\n struct pogocache_update *update = 0;\n opts->entry(shardidx, now, key, keylen, val, vallen, expires, flags,\n cas, &update, opts->udata);\n if (update) {\n // User wants to update the entry.\n shard->cas++;\n struct entry *entry2 = entry_new(key, keylen, update->value,\n update->valuelen, update->expires, update->flags, shard->cas, \n ctx);\n if (!entry2) {\n return POGOCACHE_NOMEM;\n }\n entry_settime(entry2, now);\n set_entry(bkt, entry2);\n entry_free(entry, ctx);\n }\n }\n return POGOCACHE_FOUND;\n}\n\n/// Loads an entry from the cache.\n/// Use the pogocache_load_opts.entry callback to access the value of the entry.\n/// It's possible to update the value using the 'update' param in the callback.\n/// See 'pogocache_load_opts' for all options.\n/// @returns POGOCACHE_FOUND when the entry was found.\n/// @returns POGOCACHE_NOMEM when the entry cannot be updated due to no memory.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\nint pogocache_load(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_load_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen, \n loadop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int deleteop(const void *key, size_t keylen, \n struct pogocache_delete_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defdeleteopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n struct entry *entry = map_delete(&shard->map, key, keylen, hash, ctx);\n if (!entry) {\n // Entry does not exist\n return POGOCACHE_NOTFOUND;\n }\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. It was already deleted from the map but\n // we still need to notify the user.\n if (ctx->evicted) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (opts->entry) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen,\n expires, flags, cas, opts->udata))\n {\n // User canceled the delete. Put it back into the map.\n // This insert will not cause an allocation error because the \n // previous delete operation left us with at least one available\n // bucket.\n struct entry *old;\n bool ok = map_insert(&shard->map, entry, hash, &old, ctx);\n assert(ok); (void)ok;\n assert(!old);\n return POGOCACHE_CANCELED;\n }\n }\n // Entry was successfully deleted.\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_DELETED;\n}\n\n/// Deletes an entry from the cache.\n/// See 'pogocache_delete_opts' for all options.\n/// @returns POGOCACHE_DELETED when the entry was successfully deleted.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\n/// @returns POGOCACHE_CANCELED when opts.entry callback returned false.\nint pogocache_delete(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_delete_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n deleteop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int storeop(const void *key, size_t keylen, const void *val,\n size_t vallen, struct pogocache_store_opts *opts, struct shard *shard,\n int shardidx, uint32_t hash, struct pgctx *ctx)\n{\n int count = shard->map.count;\n opts = opts ? opts : &defstoreopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int64_t expires = 0;\n if (opts->expires > 0) {\n expires = opts->expires;\n } else if (opts->ttl > 0) {\n expires = int64_add_clamp(now, opts->ttl);\n }\n if (opts->keepttl) {\n // User wants to keep the existing ttl. Get the existing entry from the\n // map first and take its expiration.\n int i;\n struct entry *old = map_get_entry(&shard->map, key, keylen, hash, &i);\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason == 0) {\n expires = entry_expires(old);\n }\n }\n }\n shard->cas++;\n struct entry *entry = entry_new(key, keylen, val, vallen, expires,\n opts->flags, shard->cas, ctx);\n if (!entry) {\n goto nomem;\n }\n entry_settime(entry, now);\n if (opts->lowmem && ctx->noevict) {\n goto nomem;\n }\n // Insert new entry into map\n struct entry *old;\n if (!map_insert(&shard->map, entry, hash, &old, ctx)) {\n goto nomem;\n }\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason) {\n // There's an old entry, but it's no longer alive.\n // Treat this like an eviction and notify the user.\n if (ctx->evicted) {\n const char *oval;\n size_t ovallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0,\n &oval, &ovallen, &oexpires, &oflags, &ocas, ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, oval, ovallen,\n oexpires, oflags, ocas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(old, ctx);\n old = 0;\n }\n }\n int put_back_status = 0;\n if (old) {\n if (opts->casop) {\n // User is requesting the cas operation.\n if (ctx->usecas) {\n uint64_t old_cas = entry_cas(old);\n if (opts->cas != old_cas) {\n // CAS test failed.\n // printf(\". cas failed: expected %\" PRIu64 \", \"\n // \"got %\" PRIu64 \"\\n\", cas, old_cas);\n put_back_status = POGOCACHE_FOUND;\n }\n } else {\n put_back_status = POGOCACHE_FOUND;\n }\n } else if (opts->nx) {\n put_back_status = POGOCACHE_FOUND;\n }\n if (put_back_status) {\n put_back:;\n // The entry needs be put back into the map and operation must\n // return early.\n // This insert operation must not fail since the entry 'e' and\n // 'old' both exist and will always be bucket swapped. There will\n // never be a new allocation.\n struct entry *e = 0;\n bool ok = map_insert(&shard->map, old, hash, &e, ctx);\n assert(ok); (void)ok;\n assert(e == entry);\n entry_free(entry, ctx);\n return put_back_status;\n }\n } else if (opts->xx || opts->casop) {\n // The new entry must not be inserted.\n // Delete it and return early.\n struct entry *e = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(e == entry); (void)e;\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (old && opts->entry) {\n // User is requesting to verify the old entry before allowing it to be\n // replaced by the new entry.\n const char *val;\n size_t vallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0, &val, &vallen, &oexpires, &oflags, &ocas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen, oexpires,\n oflags, ocas, opts->udata))\n {\n // User wants to keep the old entry.\n put_back_status = POGOCACHE_CANCELED;\n goto put_back;\n }\n }\n // The new entry was inserted.\n if (old) {\n entry_free(old, ctx);\n return POGOCACHE_REPLACED;\n } else {\n if (opts->lowmem && shard->map.count > count) {\n // The map grew by one bucket, yet the user indicates that there is\n // a low memory event. Evict one entry.\n auto_evict_entry(shard, shardidx, hash, now, ctx);\n }\n return POGOCACHE_INSERTED;\n }\nnomem:\n entry_free(entry, ctx);\n return POGOCACHE_NOMEM;\n}\n\n/// Insert or replace an entry in the cache.\n/// If an entry with the same key already exists then the cache then the \n/// the opts.entry callback can be used to check the existing\n/// value first, allowing the operation to be canceled.\n/// See 'pogocache_store_opts' for all options.\n/// @returns POGOCACHE_INSERTED when the entry was inserted.\n/// @returns POGOCACHE_REPLACED when the entry replaced an existing one.\n/// @returns POGOCACHE_FOUND when the entry already exists. (cas/nx)\n/// @returns POGOCACHE_CANCELED when the operation was canceled.\n/// @returns POGOCACHE_NOMEM when there is system memory available.\nint pogocache_store(struct pogocache *cache, const void *key, size_t keylen, \n const void *val, size_t vallen, struct pogocache_store_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n storeop(key, keylen, val, vallen, opts, shard, shardidx, hash, ctx)\n );\n}\n\n\nstatic struct pogocache *rootcache(struct pogocache *cache) {\n return cache->isbatch ? cache->batch.cache : cache;\n}\n\n/// Returns the number of shards in cache\nint pogocache_nshards(struct pogocache *cache) {\n cache = rootcache(cache);\n return cache->ctx.nshards;\n}\n\nstatic int iterop(struct shard *shard, int shardidx, int64_t now,\n struct pogocache_iter_opts *opts, struct pgctx *ctx)\n{\n char buf[128];\n int status = POGOCACHE_FINISHED;\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen,\n &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n#ifdef EVICTONITER\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n // Delete entry at bucket.\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n#endif\n } else {\n // Entry is alive, check with user for next action.\n int action = POGOCACHE_ITER_CONTINUE;\n if (opts->entry) {\n action = opts->entry(shardidx, now, key, keylen, val,\n vallen, expires, flags, cas, opts->udata);\n }\n if (action != POGOCACHE_ITER_CONTINUE) {\n if (action&POGOCACHE_ITER_DELETE) {\n // Delete entry at bucket\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n }\n if (action&POGOCACHE_ITER_STOP) {\n status = POGOCACHE_CANCELED;\n break;\n }\n }\n }\n }\n tryshrink(&shard->map, true, ctx);\n return status;\n}\n\n/// Iterate over entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The pogocache_iter_opts.entry callback can be used to perform actions such\n/// as: deleting entries and stopping iteration early. \n/// See 'pogocache_iter_opts' for all options.\n/// @return POGOCACHE_FINISHED if iteration completed\n/// @return POGOCACHE_CANCELED if iteration stopped early\nint pogocache_iter(struct pogocache *cache, struct pogocache_iter_opts *opts) {\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defiteropts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return POGOCACHE_FINISHED;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n iterop(shard, opts->oneshardidx, now, opts, &cache->ctx)\n );\n }\n for (int i = 0; i < nshards; i++) {\n int status = ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n iterop(shard, i, now, opts, &cache->ctx)\n );\n if (status != POGOCACHE_FINISHED) {\n return status;\n }\n }\n return POGOCACHE_FINISHED;\n}\n\nstatic size_t countop(struct shard *shard) {\n return shard->map.count - shard->clearcount;\n}\n\n/// Returns the number of entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_count(struct pogocache *cache,\n struct pogocache_count_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defcountopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n countop(shard);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n countop(shard);\n );\n }\n return count;\n}\n\nstatic uint64_t totalop(struct shard *shard) {\n return shard->map.total;\n}\n\n/// Returns the total number of entries that have ever been stored in the cache.\n/// For the current number of entries use pogocache_count().\n/// There's an option to allow for isolating the operation to a single shard.\nuint64_t pogocache_total(struct pogocache *cache,\n struct pogocache_total_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &deftotalopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, opts->oneshardidx,\n totalop(shard);\n );\n }\n uint64_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, i,\n totalop(shard);\n );\n }\n return count;\n}\n\nstatic size_t sizeop(struct shard *shard, bool entriesonly) {\n size_t size = 0;\n if (!entriesonly) {\n size += sizeof(struct shard);\n size += sizeof(struct bucket)*shard->map.nbuckets;\n }\n size += shard->map.entsize;\n return size;\n}\n\n/// Returns the total memory size of the shard.\n/// This includes the memory size of all data structures and entries.\n/// Use the entriesonly option to limit the result to only the entries.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_size(struct pogocache *cache,\n struct pogocache_size_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsizeopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n sizeop(shard, opts->entriesonly);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n sizeop(shard, opts->entriesonly);\n );\n }\n return count;\n}\n\n\n\nstatic int sweepop(struct shard *shard, int shardidx, int64_t now,\n size_t *swept, size_t *kept, struct pgctx *ctx)\n{\n char buf[128];\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int64_t expires = entry_expires(entry);\n int64_t etime = entry_time(entry);\n int reason = entry_alive_exp(expires, etime, now, shard->cleartime);\n if (reason == 0) {\n // entry is still alive\n (*kept)++;\n continue;\n }\n // entry is no longer alive.\n if (ctx->evicted) {\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen, &expires,\n &flags, &cas, ctx);\n // Report eviction to user\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n (*swept)++;\n // Entry was deleted from bucket, which may move entries to the right\n // over one bucket to the left. So we need to check the same bucket\n // again.\n i--;\n }\n tryshrink(&shard->map, true, ctx);\n return 0;\n}\n\n/// Remove expired entries from the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The final 'kept' or 'swept' counts are returned.\n/// @return POGOCACHE_FINISHED when iteration completed\n/// @return POGOCACHE_CANCELED when iteration stopped early\nvoid pogocache_sweep(struct pogocache *cache, size_t *swept, size_t *kept, \n struct pogocache_sweep_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweepopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n size_t sweptc = 0;\n size_t keptc = 0;\n if (opts->oneshard) {\n if (opts->oneshardidx >= 0 && opts->oneshardidx < nshards) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n sweepop(shard, opts->oneshardidx, now, &sweptc, &keptc,\n &cache->ctx);\n );\n }\n } else {\n for (int i = 0; i < nshards; i++) {\n size_t sweptc2 = 0;\n size_t keptc2 = 0;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n sweepop(shard, i, now, &sweptc2, &keptc2, &cache->ctx);\n );\n sweptc += sweptc2;\n keptc += keptc2;\n }\n }\n if (swept) {\n *swept = sweptc;\n }\n if (kept) {\n *kept = keptc;\n }\n}\n\nstatic int clearop(struct shard *shard, int shardidx, int64_t now, \n struct pgctx *ctx)\n{\n (void)shardidx, (void)ctx;\n shard->cleartime = now;\n shard->clearcount += (shard->map.count-shard->clearcount);\n return 0;\n}\n\n/// Clear the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nvoid pogocache_clear(struct pogocache *cache, struct pogocache_clear_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defclearopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return;\n }\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n clearop(shard, opts->oneshardidx, now, &cache->ctx);\n );\n return;\n }\n for (int i = 0; i < cache->ctx.nshards; i++) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n clearop(shard, i, now, &cache->ctx);\n );\n }\n}\n\nstatic int sweeppollop(struct shard *shard, int shardidx, int64_t now, \n int pollsize, double *percent)\n{\n // start at random bucket\n int count = 0;\n int dead = 0;\n int bidx = mix13(now+shardidx)%shard->map.nbuckets;\n for (int i = 0; i < shard->map.nbuckets && count < pollsize; i++) {\n struct bucket *bkt = &shard->map.buckets[(bidx+i)%shard->map.nbuckets];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n count++;\n dead += (entry_alive(entry, now, shard->cleartime) != 0);\n }\n if (count == 0) {\n *percent = 0;\n return 0;\n }\n *percent = (double)dead/(double)count;\n return 0;\n}\n\ndouble pogocache_sweep_poll(struct pogocache *cache, \n struct pogocache_sweep_poll_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweeppollopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int pollsize = opts->pollsize == 0 ? 20 : opts->pollsize;\n \n // choose a random shard\n int shardidx = mix13(now)%nshards;\n double percent;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, shardidx,\n sweeppollop(shard, shardidx, now, pollsize, &percent);\n );\n return percent;\n}\n"], ["/pogocache/src/parse.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit parse.c provides the entrypoint for parsing all data \n// for incoming client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n\n__thread char parse_lasterr[1024] = \"\";\n\nconst char *parse_lasterror(void) {\n return parse_lasterr;\n}\n\nssize_t parse_resp(const char *bytes, size_t len, struct args *args);\nssize_t parse_memcache(const char *data, size_t len, struct args *args,\n bool *noreply);\nssize_t parse_http(const char *data, size_t len, struct args *args,\n int *httpvers, bool *keepalive);\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args);\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pg);\n\nstatic bool sniff_proto(const char *data, size_t len, int *proto) {\n if (len > 0 && data[0] == '*') {\n *proto = PROTO_RESP;\n return true;\n }\n if (len > 0 && data[0] == '\\0') {\n *proto = PROTO_POSTGRES;\n return true;\n }\n // Parse the first line of text\n size_t n = 0;\n for (size_t i = 0; i < len; i++) {\n if (data[i] == '\\n') {\n n = i+1;\n break;\n }\n }\n // Look for \" HTTP/*.*\\r\\n\" suffix\n if (n >= 11 && memcmp(data+n-11, \" HTTP/\", 5) == 0 && \n data[n-4] == '.' && data[n-2] == '\\r')\n {\n *proto = PROTO_HTTP;\n return true;\n }\n // Trim the prefix, Resp+Telnet and Memcache both allow for spaces between\n // arguments.\n while (*data == ' ') {\n data++;\n n--;\n len--;\n }\n // Treat all uppercase commands as Resp+Telnet\n if (n > 0 && data[0] >= 'A' && data[0] <= 'Z') {\n *proto = PROTO_RESP;\n return true;\n }\n // Look for Memcache commands\n if (n >= 1) {\n *proto = PROTO_MEMCACHE;\n return true;\n }\n // Protocol is unknown\n *proto = 0;\n return false;\n}\n\n// Returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\n// On success, the args and proto will be set to the command arguments and\n// protocol type, respectively.\n//\n// It's required to set proto to 0 for the first command, per client.\n// Then continue to provide the last known proto. \n// This allows for the parser to learn and predict the protocol for ambiguous\n// protocols; like Resp+Telnet, Memcache+Text, HTTP, etc.\n//\n// The noreply param is an output param that is only set when the proto is\n// memcache. The argument is stripped from the args array,\n// but made available to the caller in case it needs to be known.\n//\n// The keepalive param is an output param that is only set when the proto is\n// http. It's used to let the caller know to keep the connection alive for\n// another request.\nssize_t parse_command(const void *data, size_t len, struct args *args, \n int *proto, bool *noreply, int *httpvers, bool *keepalive, struct pg **pg)\n{\n args_clear(args);\n parse_lasterr[0] = '\\0';\n *httpvers = 0;\n *noreply = false;\n *keepalive = false;\n // Sniff for the protocol. This should only happen once per client, upon\n // their first request.\n if (*proto == 0) {\n if (!sniff_proto(data, len, proto)) {\n // Unknown protocol\n goto fail;\n }\n if (*proto == 0) {\n // Not enough data to determine yet\n return 0;\n }\n }\n if (*proto == PROTO_RESP) {\n const uint8_t *bytes = data;\n if (bytes[0] == '*') {\n return parse_resp(data, len, args);\n } else {\n return parse_resp_telnet(data, len, args);\n }\n } else if (*proto == PROTO_MEMCACHE) {\n return parse_memcache(data, len, args, noreply);\n } else if (*proto == PROTO_HTTP) {\n return parse_http(data, len, args, httpvers, keepalive);\n } else if (*proto == PROTO_POSTGRES) {\n return parse_postgres(data, len, args, pg);\n }\nfail:\n parse_seterror(\"ERROR\");\n return -1;\n}\n\n"], ["/pogocache/src/save.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit save.c provides an interface for saving and loading Pogocache\n// data files.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"pogocache.h\"\n#include \"buf.h\"\n#include \"util.h\"\n#include \"lz4.h\"\n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#define BLOCKSIZE 1048576\n#define COMPRESS\n\nextern struct pogocache *cache;\nextern const int verb;\n\nstruct savectx {\n pthread_t th; // work thread\n int index; // thread index\n pthread_mutex_t *lock; // write lock\n int fd; // work file descriptor\n int start; // current shard\n int count; // number of shards to process\n struct buf buf; // block buffer\n bool ok; // final ok\n int errnum; // final errno status\n struct buf dst; // compressed buffer space\n size_t nentries; // number of entried in block buffer\n};\n\nstatic int flush(struct savectx *ctx) {\n if (ctx->nentries == 0) {\n ctx->buf.len = 0;\n return 0;\n }\n // Make sure that there's enough space in the dst buffer to store the\n // header (16 bytes) and the compressed data.\n size_t bounds = LZ4_compressBound(ctx->buf.len);\n buf_ensure(&ctx->dst, 16+bounds);\n // Compress the block\n uint32_t len = LZ4_compress_default((char*)ctx->buf.data, \n (char*)ctx->dst.data+16, ctx->buf.len, bounds);\n // The block is now compressed.\n // Genreate a checksum of the compressed data.\n uint32_t crc = crc32(ctx->dst.data+16, len);\n // Write the 16 byte header\n // (0-3) 'POGO' tag\n memcpy(ctx->dst.data, \"POGO\", 4);\n // (4-7) Checksum\n write_u32(ctx->dst.data+4, crc);\n // (8-11) Len of decompressed data \n write_u32(ctx->dst.data+8, ctx->buf.len);\n // (12-15) Len of compressed data \n write_u32(ctx->dst.data+12, len);\n // The rest of the dst buffer contains the compressed bytes\n uint8_t *p = (uint8_t*)ctx->dst.data;\n uint8_t *end = p + len+16;\n bool ok = true;\n pthread_mutex_lock(ctx->lock);\n while (p < end) {\n ssize_t n = write(ctx->fd, p, end-p);\n if (n < 0) {\n ok = false;\n break;\n }\n p += n;\n }\n pthread_mutex_unlock(ctx->lock);\n ctx->buf.len = 0;\n ctx->nentries = 0;\n return ok ? 0 : -1;\n};\n\nstatic int save_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard;\n struct savectx *ctx = udata;\n buf_append_byte(&ctx->buf, 0); // entry type. zero=k/v string pair;\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n buf_append_uvarint(&ctx->buf, valuelen);\n buf_append(&ctx->buf, value, valuelen);\n if (expires > 0) {\n int64_t ttl = expires-time;\n assert(ttl > 0);\n buf_append_uvarint(&ctx->buf, ttl);\n } else {\n buf_append_uvarint(&ctx->buf, 0);\n }\n buf_append_uvarint(&ctx->buf, flags);\n buf_append_uvarint(&ctx->buf, cas);\n ctx->nentries++;\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void *thsave(void *arg) {\n struct savectx *ctx = arg;\n for (int i = 0; i < ctx->count; i++) {\n int shardidx = ctx->start+i;\n struct pogocache_iter_opts opts = {\n .oneshard = true,\n .oneshardidx = shardidx,\n .time = sys_now(),\n .entry = save_entry,\n .udata = ctx,\n };\n // write the unix timestamp before entries\n buf_append_uvarint(&ctx->buf, sys_unixnow());\n int status = pogocache_iter(cache, &opts);\n if (status == POGOCACHE_CANCELED) {\n goto done;\n }\n if (flush(ctx) == -1) {\n goto done;\n }\n }\n ctx->ok = true;\ndone:\n buf_clear(&ctx->buf);\n buf_clear(&ctx->dst);\n ctx->errnum = errno;\n return 0;\n}\n\nint save(const char *path, bool fast) {\n uint64_t seed = sys_seed();\n size_t psize = strlen(path)+32;\n char *workpath = xmalloc(psize);\n snprintf(workpath, psize, \"%s.%08x.pogocache.work\", path, \n (int)(seed%INT_MAX));\n if (verb > 1) {\n printf(\". Saving to work file %s\\n\", workpath);\n }\n int fd = open(workpath, O_RDWR|O_CREAT, S_IRUSR|S_IRGRP|S_IROTH);\n if (fd == -1) {\n return -1;\n }\n int nshards = pogocache_nshards(cache);\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n if (!fast) {\n nprocs = 1;\n }\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n struct savectx *ctxs = xmalloc(nprocs*sizeof(struct savectx));\n memset(ctxs, 0, nprocs*sizeof(struct savectx));\n bool ok = false;\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n ctx->index = i;\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->fd = fd;\n ctx->lock = &lock;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (nprocs > 1) {\n if (pthread_create(&ctx->th, 0, thsave, ctx) == -1) {\n ctx->th = 0;\n }\n }\n start += ctx->count;\n }\n // execute operations on failed threads (or fast=false)\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thsave(ctx);\n }\n }\n // wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n // check for any failures\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (!ctx->ok) {\n errno = ctx->errnum;\n goto done;\n }\n }\n // Move file work file to final path\n if (rename(workpath, path) == -1) {\n goto done;\n }\n ok = true;\ndone:\n close(fd);\n unlink(workpath);\n xfree(workpath);\n xfree(ctxs);\n return ok ? 0 : -1;\n}\n\n// compressed block\nstruct cblock {\n struct buf cdata; // compressed data\n size_t dlen; // decompressed size\n};\n\nstruct loadctx {\n pthread_t th;\n\n // shared context\n pthread_mutex_t *lock;\n pthread_cond_t *cond;\n bool *donereading; // shared done flag\n int *nblocks; // number of blocks in queue\n struct cblock *blocks; // the block queue\n bool *failure; // a thread will set this upon error\n\n // thread status\n atomic_bool ok;\n int errnum;\n size_t ninserted;\n size_t nexpired;\n};\n\nstatic bool load_block(struct cblock *block, struct loadctx *ctx) {\n (void)ctx;\n bool ok = false;\n\n int64_t now = sys_now();\n int64_t unixnow = sys_unixnow();\n\n // decompress block\n char *ddata = xmalloc(block->dlen);\n int ret = LZ4_decompress_safe(block->cdata.data, ddata, block->cdata.len, \n block->dlen);\n if (ret < 0 || (size_t)ret != block->dlen) {\n printf(\". bad compressed block\\n\");\n goto done;\n }\n buf_clear(&block->cdata);\n uint8_t *p = (void*)ddata;\n uint8_t *e = p + block->dlen;\n\n int n;\n uint64_t x;\n // read unix time\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n printf(\". bad unix time\\n\");\n goto done;\n }\n p += n;\n\n int64_t unixtime = x;\n // printf(\". unixtime=%lld\\n\", unixtime);\n\n // Read each entry from decompressed data\n while (e > p) {\n /////////////////////\n // kind\n uint8_t kind = *(p++);\n \n if (kind != 0) {\n // only k/v strings allowed at this time.\n printf(\">> %d\\n\", kind);\n printf(\". unknown kind\\n\");\n goto done;\n }\n /////////////////////\n // key\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t keylen = x;\n if ((size_t)(e-p) < keylen) {\n goto done;\n }\n const uint8_t *key = p;\n p += keylen;\n /////////////////////\n // val\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t vallen = x;\n if ((size_t)(e-p) < vallen) {\n goto done;\n }\n const uint8_t *val = p;\n p += vallen;\n /////////////////////\n // ttl\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n goto done;\n }\n int64_t ttl = x;\n p += n;\n /////////////////////\n // flags\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > UINT32_MAX) {\n goto done;\n }\n uint32_t flags = x;\n p += n;\n /////////////////////\n // cas\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0) {\n goto done;\n }\n uint64_t cas = x;\n p += n;\n if (ttl > 0) {\n int64_t unixexpires = int64_add_clamp(unixtime, ttl);\n if (unixexpires < unixnow) {\n // already expired, skip this entry\n ctx->nexpired++;\n continue;\n }\n ttl = unixexpires-unixnow;\n }\n struct pogocache_store_opts opts = {\n .flags = flags,\n .time = now,\n .ttl = ttl,\n .cas = cas,\n };\n // printf(\"[%.*s]=[%.*s]\\n\", (int)keylen, key, (int)vallen, val);\n int ret = pogocache_store(cache, key, keylen, val, vallen, &opts);\n (void)ret;\n assert(ret == POGOCACHE_INSERTED || ret == POGOCACHE_REPLACED);\n ctx->ninserted++;\n }\n ok = true;\ndone:\n buf_clear(&block->cdata);\n xfree(ddata);\n if (!ok) {\n printf(\". bad block\\n\");\n }\n return ok;\n}\n\nstatic void *thload(void *arg) {\n struct loadctx *ctx = arg;\n pthread_mutex_lock(ctx->lock);\n while (1) {\n if (*ctx->failure) {\n break;\n }\n if (*ctx->nblocks > 0) {\n // Take a block for processing\n struct cblock block = ctx->blocks[(*ctx->nblocks)-1];\n (*ctx->nblocks)--;\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n ctx->ok = load_block(&block, ctx);\n pthread_mutex_lock(ctx->lock);\n if (!ctx->ok) {\n *ctx->failure = true;\n break;\n }\n // next block\n continue;\n }\n if (*ctx->donereading) {\n break;\n }\n pthread_cond_wait(ctx->cond, ctx->lock);\n }\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n if (!ctx->ok) {\n ctx->errnum = errno;\n }\n return 0;\n}\n\n// load data into cache from path\nint load(const char *path, bool fast, struct load_stats *stats) {\n // Use a single stream reader. Handing off blocks to threads.\n struct load_stats sstats;\n if (!stats) {\n stats = &sstats;\n }\n memset(stats, 0, sizeof(struct load_stats));\n\n int fd = open(path, O_RDONLY);\n if (fd == -1) {\n return -1;\n }\n\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n pthread_cond_t cond = PTHREAD_COND_INITIALIZER;\n bool donereading = false;\n bool failure = false;\n\n int nprocs = fast ? sys_nprocs() : 1;\n struct loadctx *ctxs = xmalloc(nprocs*sizeof(struct loadctx));\n memset(ctxs, 0, nprocs*sizeof(struct loadctx));\n int nblocks = 0;\n struct cblock *blocks = xmalloc(sizeof(struct cblock)*nprocs);\n memset(blocks, 0, sizeof(struct cblock)*nprocs);\n int therrnum = 0;\n bool ok = true;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n ctx->lock = &lock;\n ctx->cond = &cond;\n ctx->donereading = &donereading;\n ctx->nblocks = &nblocks;\n ctx->failure = &failure;\n ctx->blocks = blocks;\n atomic_init(&ctx->ok, true);\n if (pthread_create(&ctx->th, 0, thload, ctx) == -1) {\n ctx->th = 0;\n ok = false;\n if (therrnum == 0) {\n therrnum = errno;\n }\n }\n }\n if (!ok) {\n // there was an error creating a thread. \n // At this point there may be some orphaned threads waiting on \n // a condition variable. \n goto shutdown_threads;\n }\n\n // Read the blocks from file, one at a time, handing putting blocks into\n // the 'blocks' queue. The running threads will pick these up and \n // process them in no specific order.\n struct buf cdata = { 0 };\n bool shortread = false;\n while (ok) {\n uint8_t head[16];\n ssize_t size = read(fd, head, 16);\n if (size <= 0) {\n if (size == -1) {\n ok = false;\n }\n break;\n }\n if (size < 16) {\n printf(\". bad head size\\n\");\n ok = false;\n break;\n }\n if (memcmp(head, \"POGO\", 4) != 0) {\n printf(\". missing 'POGO'\\n\");\n ok = false;\n break;\n }\n uint32_t crc;\n memcpy(&crc, head+4, 4);\n size_t dlen = read_u32(head+8);\n size_t clen = read_u32(head+12);\n buf_ensure(&cdata, clen);\n bool okread = true;\n size_t total = 0;\n while (total < clen) {\n ssize_t rlen = read(fd, cdata.data+total, clen-total);\n if (rlen <= 0) {\n shortread = true;\n okread = false;\n break;\n }\n total += rlen;\n }\n if (!okread) {\n if (shortread) {\n printf(\". shortread\\n\");\n }\n ok = false;\n break;\n }\n cdata.len = clen;\n stats->csize += clen;\n stats->dsize += dlen;\n uint32_t crc2 = crc32(cdata.data, clen);\n if (crc2 != crc) {\n printf(\". bad crc\\n\");\n ok = false;\n goto bdone;\n }\n // We have a good block. Push it into the queue\n pthread_mutex_lock(&lock);\n while (1) {\n if (failure) {\n // A major error occured, stop reading now\n ok = false;\n break;\n }\n if (nblocks == nprocs) {\n // Queue is currently filled up.\n // Wait and try again.\n pthread_cond_wait(&cond, &lock);\n continue;\n }\n // Add block to queue\n blocks[nblocks++] = (struct cblock){ \n .cdata = cdata,\n .dlen = dlen,\n };\n memset(&cdata, 0, sizeof(struct buf));\n pthread_cond_broadcast(&cond);\n break;\n }\n pthread_mutex_unlock(&lock);\n }\nbdone:\n buf_clear(&cdata);\n\n\nshutdown_threads:\n // Stop all threads\n pthread_mutex_lock(&lock);\n donereading = true;\n pthread_mutex_unlock(&lock);\n pthread_cond_broadcast(&cond);\n\n // Wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n stats->nexpired += ctx->nexpired;\n stats->ninserted += ctx->ninserted;\n }\n }\n\n // Get the current error, if any\n errno = 0;\n ok = ok && !failure;\n if (!ok) {\n errno = therrnum;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n if (!ctx->ok) {\n errno = ctx->errnum;\n break;\n }\n }\n }\n }\n\n // Free all resources.\n for (int i = 0; i < nblocks; i++) {\n buf_clear(&blocks[i].cdata);\n }\n xfree(blocks);\n xfree(ctxs);\n close(fd);\n return ok ? 0 : -1;\n}\n\n// removes all work files and checks that the current directory is valid.\nbool cleanwork(const char *persist) {\n if (*persist == '\\0') {\n return false;\n }\n bool ok = false;\n char *path = xmalloc(strlen(persist)+1);\n strcpy(path, persist);\n char *dirpath = dirname(path);\n DIR *dir = opendir(dirpath);\n if (!dir) {\n perror(\"# opendir\");\n goto done;\n }\n struct dirent *entry;\n while ((entry = readdir(dir))) {\n if (entry->d_type != DT_REG) {\n continue;\n }\n const char *ext = \".pogocache.work\";\n if (strlen(entry->d_name) < strlen(ext) ||\n strcmp(entry->d_name+strlen(entry->d_name)-strlen(ext), ext) != 0)\n {\n continue;\n }\n size_t filepathcap = strlen(dirpath)+1+strlen(entry->d_name)+1;\n char *filepath = xmalloc(filepathcap);\n snprintf(filepath, filepathcap, \"%s/%s\", dirpath, entry->d_name);\n if (unlink(filepath) == 0) {\n printf(\"# deleted work file %s\\n\", filepath);\n } else {\n perror(\"# unlink\");\n }\n xfree(filepath);\n }\n ok = true;\ndone:\n if (dir) {\n closedir(dir);\n }\n xfree(path);\n return ok;\n}\n"], ["/pogocache/src/net.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit net.c provides most network functionality, including listening on ports,\n// thread creation, event queue handling, and reading & writing sockets.\n#define _GNU_SOURCE\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#ifdef __linux__\n#include \n#include \n#include \n#include \n#else\n#include \n#endif\n\n#include \"uring.h\"\n#include \"stats.h\"\n#include \"net.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"xmalloc.h\"\n\n#define PACKETSIZE 16384\n#define MINURINGEVENTS 2 // there must be at least 2 events for uring use\n\nextern const int verb;\n\nstatic int setnonblock(int fd) {\n int flags = fcntl(fd, F_GETFL, 0);\n if (flags == -1) {\n return -1;\n }\n return fcntl(fd, F_SETFL, flags | O_NONBLOCK);\n}\n\nstatic int settcpnodelay(int fd, bool nodelay) {\n int val = nodelay;\n return setsockopt(fd, SOL_SOCKET, TCP_NODELAY, &val, sizeof(val)) == 0;\n}\n\nstatic int setquickack(int fd, bool quickack) {\n#if defined(__linux__)\n int val = quickack;\n return setsockopt(fd, SOL_SOCKET, TCP_QUICKACK, &val, sizeof(val)) == 0;\n#else\n (void)fd, (void)quickack;\n return 0;\n#endif\n}\n\nstatic int setkeepalive(int fd, bool keepalive) {\n int val = keepalive;\n if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val))) {\n return -1;\n }\n#if defined(__linux__)\n if (!keepalive) {\n return 0;\n }\n // tcp_keepalive_time\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &(int){300}, sizeof(int))) \n {\n return -1;\n }\n // tcp_keepalive_intvl\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &(int){30}, sizeof(int)))\n {\n return -1;\n }\n // tcp_keepalive_probes\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &(int){3}, sizeof(int))) {\n return -1;\n }\n#endif\n return 0;\n}\n\n#ifdef __linux__\ntypedef struct epoll_event event_t;\n#else\ntypedef struct kevent event_t;\n#endif\n\nstatic int event_fd(event_t *ev) {\n#ifdef __linux__\n return ev->data.fd;\n#else\n return ev->ident;\n#endif\n}\n\nstatic int getevents(int fd, event_t evs[], int nevs, bool wait_forever, \n int64_t timeout)\n{\n if (wait_forever) {\n#ifdef __linux__\n return epoll_wait(fd, evs, nevs, -1);\n#else\n return kevent(fd, NULL, 0, evs, nevs, 0);\n#endif\n } else {\n timeout = timeout < 0 ? 0 : \n timeout > 900000000 ? 900000000 : // 900ms\n timeout;\n#ifdef __linux__\n timeout = timeout / 1000000;\n return epoll_wait(fd, evs, nevs, timeout);\n#else\n struct timespec timespec = { .tv_nsec = timeout };\n return kevent(fd, NULL, 0, evs, nevs, ×pec);\n#endif\n }\n}\n\nstatic int addread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN | EPOLLEXCLUSIVE;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int addwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int evqueue(void) {\n#ifdef __linux__\n return epoll_create1(0);\n#else\n return kqueue();\n#endif\n}\n\nstruct bgworkctx { \n void (*work)(void *udata);\n void (*done)(struct net_conn *conn, void *udata);\n struct net_conn *conn;\n void *udata;\n bool writer;\n};\n\n// static void bgdone(struct bgworkctx *bgctx);\n\nstruct net_conn {\n int fd;\n struct net_conn *next; // for hashmap bucket\n bool closed;\n struct tls *tls;\n void *udata;\n char *out;\n size_t outlen;\n size_t outcap;\n struct bgworkctx *bgctx;\n struct qthreadctx *ctx;\n unsigned stat_cmd_get;\n unsigned stat_cmd_set;\n unsigned stat_get_hits;\n unsigned stat_get_misses;\n};\n\nstatic struct net_conn *conn_new(int fd, struct qthreadctx *ctx) {\n struct net_conn *conn = xmalloc(sizeof(struct net_conn));\n memset(conn, 0, sizeof(struct net_conn));\n conn->fd = fd;\n conn->ctx = ctx;\n return conn;\n}\n\nstatic void conn_free(struct net_conn *conn) {\n if (conn) {\n if (conn->out) {\n xfree(conn->out);\n }\n xfree(conn);\n }\n}\n\nvoid net_conn_out_ensure(struct net_conn *conn, size_t amount) {\n if (conn->outcap-conn->outlen >= amount) {\n return;\n }\n size_t cap = conn->outcap == 0 ? 16 : conn->outcap * 2;\n while (cap-conn->outlen < amount) {\n cap *= 2;\n }\n char *out = xmalloc(cap);\n memcpy(out, conn->out, conn->outlen);\n xfree(conn->out);\n conn->out = out;\n conn->outcap = cap;\n}\n\nvoid net_conn_out_write_byte_nocheck(struct net_conn *conn, char byte) {\n conn->out[conn->outlen++] = byte;\n}\n\nvoid net_conn_out_write_byte(struct net_conn *conn, char byte) {\n if (conn->outcap == conn->outlen) {\n net_conn_out_ensure(conn, 1);\n }\n net_conn_out_write_byte_nocheck(conn, byte);\n}\n\nvoid net_conn_out_write_nocheck(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n memcpy(conn->out+conn->outlen, data, nbytes);\n conn->outlen += nbytes;\n}\n\nvoid net_conn_out_write(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n if (conn->outcap-conn->outlen < nbytes) {\n net_conn_out_ensure(conn, nbytes);\n }\n net_conn_out_write_nocheck(conn, data, nbytes);\n}\n\nchar *net_conn_out(struct net_conn *conn) {\n return conn->out;\n}\n\nsize_t net_conn_out_len(struct net_conn *conn) {\n return conn->outlen;\n}\n\nsize_t net_conn_out_cap(struct net_conn *conn) {\n return conn->outcap;\n}\n\nvoid net_conn_out_setlen(struct net_conn *conn, size_t len) {\n assert(len < conn->outcap);\n conn->outlen = len;\n}\n\n\nbool net_conn_isclosed(struct net_conn *conn) {\n return conn->closed;\n}\n\nvoid net_conn_close(struct net_conn *conn) {\n conn->closed = true;\n}\n\nvoid net_conn_setudata(struct net_conn *conn, void *udata) {\n conn->udata = udata;\n}\n\nvoid *net_conn_udata(struct net_conn *conn) {\n return conn->udata;\n}\n\nstatic uint64_t hashfd(int fd) {\n return mix13((uint64_t)fd);\n}\n\n// map of connections\nstruct cmap {\n struct net_conn **buckets;\n size_t nbuckets;\n size_t len;\n};\n\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn);\n\nstatic void cmap_grow(struct cmap *cmap) {\n struct cmap cmap2 = { 0 };\n cmap2.nbuckets = cmap->nbuckets*2;\n size_t size = cmap2.nbuckets * sizeof(struct net_conn*);\n cmap2.buckets = xmalloc(size);\n memset(cmap2.buckets, 0, cmap2.nbuckets*sizeof(struct net_conn*));\n for (size_t i = 0; i < cmap->nbuckets; i++) {\n struct net_conn *conn = cmap->buckets[i];\n while (conn) {\n struct net_conn *next = conn->next;\n conn->next = 0;\n cmap_insert(&cmap2, conn);\n conn = next;\n }\n }\n xfree(cmap->buckets);\n memcpy(cmap, &cmap2, sizeof(struct cmap));\n}\n\n// Insert a connection into a map. \n// The connection MUST NOT exist in the map.\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n if (cmap->len >= cmap->nbuckets-(cmap->nbuckets>>2)) { // 75% load factor\n // if (cmap->len >= cmap->nbuckets) { // 100% load factor\n cmap_grow(cmap);\n }\n size_t i = hash % cmap->nbuckets;\n conn->next = cmap->buckets[i];\n cmap->buckets[i] = conn;\n cmap->len++;\n}\n\n// Return the connection or NULL if not exists.\nstatic struct net_conn *cmap_get(struct cmap *cmap, int fd) {\n uint32_t hash = hashfd(fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *conn = cmap->buckets[i];\n while (conn && conn->fd != fd) {\n conn = conn->next;\n }\n return conn;\n}\n\n// Delete connection from map. \n// The connection MUST exist in the map.\nstatic void cmap_delete(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *prev = 0;\n struct net_conn *iter = cmap->buckets[i];\n while (iter != conn) {\n prev = iter;\n iter = iter->next;\n }\n if (prev) {\n prev->next = iter->next;\n } else {\n cmap->buckets[i] = iter->next;\n }\n}\n\nstatic atomic_size_t nconns = 0;\nstatic atomic_size_t tconns = 0;\nstatic atomic_size_t rconns = 0;\n\nstatic pthread_mutex_t tls_ready_fds_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic int tls_ready_fds_cap = 0;\nstatic int tls_ready_fds_len = 0;\nstatic int *tls_ready_fds = 0;\n\nstatic void save_tls_fd(int fd) {\n pthread_mutex_lock(&tls_ready_fds_lock);\n if (tls_ready_fds_len == tls_ready_fds_cap) {\n tls_ready_fds_cap *= 2;\n if (tls_ready_fds_cap == 0) {\n tls_ready_fds_cap = 8;\n }\n tls_ready_fds = xrealloc(tls_ready_fds, tls_ready_fds_cap*sizeof(int));\n }\n tls_ready_fds[tls_ready_fds_len++] = fd;\n pthread_mutex_unlock(&tls_ready_fds_lock);\n}\n\nstatic bool del_tls_fd(int fd) {\n bool found = false;\n pthread_mutex_lock(&tls_ready_fds_lock);\n for (int i = 0; i < tls_ready_fds_len; i++) {\n if (tls_ready_fds[i] == fd) {\n tls_ready_fds[i] = tls_ready_fds[tls_ready_fds_len-1];\n tls_ready_fds_len--;\n found = true;\n break;\n }\n }\n pthread_mutex_unlock(&tls_ready_fds_lock);\n return found;\n}\n\nstruct qthreadctx {\n pthread_t th;\n int qfd;\n int index;\n int maxconns;\n int *sfd; // three entries\n bool tcpnodelay;\n bool keepalive;\n bool quickack;\n int queuesize;\n const char *unixsock;\n void *udata;\n bool uring;\n#ifndef NOURING\n struct io_uring ring;\n#endif\n void(*data)(struct net_conn*,const void*,size_t,void*);\n void(*opened)(struct net_conn*,void*);\n void(*closed)(struct net_conn*,void*);\n int nevents;\n event_t *events;\n atomic_int nconns;\n int ntlsconns;\n char *inpkts;\n struct net_conn **qreads;\n struct net_conn **qins;\n struct net_conn **qattachs;\n struct net_conn **qouts;\n struct net_conn **qcloses;\n char **qinpkts;\n int *qinpktlens; \n int nqreads;\n int nqins;\n int nqcloses;\n int nqattachs;\n int nqouts;\n int nthreads;\n \n uint64_t stat_cmd_get;\n uint64_t stat_cmd_set;\n uint64_t stat_get_hits;\n uint64_t stat_get_misses;\n\n struct qthreadctx *ctxs;\n struct cmap cmap;\n};\n\nstatic atomic_uint_fast64_t g_stat_cmd_get = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_set = 0;\nstatic atomic_uint_fast64_t g_stat_get_hits = 0;\nstatic atomic_uint_fast64_t g_stat_get_misses = 0;\n\ninline\nstatic void sumstats(struct net_conn *conn, struct qthreadctx *ctx) {\n ctx->stat_cmd_get += conn->stat_cmd_get;\n conn->stat_cmd_get = 0;\n ctx->stat_cmd_set += conn->stat_cmd_set;\n conn->stat_cmd_set = 0;\n ctx->stat_get_hits += conn->stat_get_hits;\n conn->stat_get_hits = 0;\n ctx->stat_get_misses += conn->stat_get_misses;\n conn->stat_get_misses = 0;\n}\n\ninline\nstatic void sumstats_global(struct qthreadctx *ctx) {\n atomic_fetch_add_explicit(&g_stat_cmd_get, ctx->stat_cmd_get, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_get = 0;\n atomic_fetch_add_explicit(&g_stat_cmd_set, ctx->stat_cmd_set, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_set = 0;\n atomic_fetch_add_explicit(&g_stat_get_hits, ctx->stat_get_hits, \n __ATOMIC_RELAXED);\n ctx->stat_get_hits = 0;\n atomic_fetch_add_explicit(&g_stat_get_misses, ctx->stat_get_misses, \n __ATOMIC_RELAXED);\n ctx->stat_get_misses = 0;\n}\n\nuint64_t stat_cmd_get(void) {\n uint64_t x = atomic_load_explicit(&g_stat_cmd_get, __ATOMIC_RELAXED);\n return x;\n}\n\nuint64_t stat_cmd_set(void) {\n return atomic_load_explicit(&g_stat_cmd_set, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_hits(void) {\n return atomic_load_explicit(&g_stat_get_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_misses(void) {\n return atomic_load_explicit(&g_stat_get_misses, __ATOMIC_RELAXED);\n}\n\ninline\nstatic void qreset(struct qthreadctx *ctx) {\n ctx->nqreads = 0;\n ctx->nqins = 0;\n ctx->nqcloses = 0;\n ctx->nqouts = 0;\n ctx->nqattachs = 0;\n}\n\ninline\nstatic void qaccept(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nevents; i++) {\n int fd = event_fd(&ctx->events[i]);\n struct net_conn *conn = cmap_get(&ctx->cmap, fd);\n if (!conn) {\n if ((fd == ctx->sfd[0] || fd == ctx->sfd[1] || fd == ctx->sfd[2])) {\n int sfd = fd;\n fd = accept(fd, 0, 0);\n if (fd == -1) {\n continue;\n }\n if (setnonblock(fd) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[0] || sfd == ctx->sfd[2]) {\n if (setkeepalive(fd, ctx->keepalive) == -1) {\n close(fd);\n continue;\n }\n if (settcpnodelay(fd, ctx->tcpnodelay) == -1) {\n close(fd);\n continue;\n }\n if (setquickack(fd, ctx->quickack) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[2]) {\n save_tls_fd(fd);\n }\n }\n static atomic_uint_fast64_t next_ctx_index = 0;\n int idx = atomic_fetch_add(&next_ctx_index, 1) % ctx->nthreads;\n if (addread(ctx->ctxs[idx].qfd, fd) == -1) {\n if (sfd == ctx->sfd[2]) {\n del_tls_fd(fd);\n }\n close(fd);\n continue;\n }\n continue;\n }\n size_t xnconns = atomic_fetch_add(&nconns, 1);\n if (xnconns >= (size_t)ctx->maxconns) {\n // rejected\n atomic_fetch_add(&rconns, 1);\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n continue;\n }\n bool istls = del_tls_fd(fd);\n conn = conn_new(fd, ctx);\n if (istls) {\n if (!tls_accept(conn->fd, &conn->tls)) {\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n conn_free(conn);\n continue;\n }\n ctx->ntlsconns++;\n }\n atomic_fetch_add_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_add_explicit(&tconns, 1, __ATOMIC_RELEASE);\n cmap_insert(&ctx->cmap, conn);\n ctx->opened(conn, ctx->udata);\n }\n if (conn->bgctx) {\n // BGWORK(2)\n // The connection has been added back to the event loop, but it\n // needs to be attached and restated.\n ctx->qattachs[ctx->nqattachs++] = conn;\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void handle_read(ssize_t n, char *pkt, struct net_conn *conn,\n struct qthreadctx *ctx)\n{\n assert(conn->outlen == 0);\n assert(conn->bgctx == 0);\n if (n <= 0) {\n if (n == 0 || errno != EAGAIN) {\n // read failed, close connection\n ctx->qcloses[ctx->nqcloses++] = conn;\n return;\n }\n assert(n == -1 && errno == EAGAIN);\n // even though there's an EAGAIN, still call the user data event\n // handler with an empty packet \n n = 0;\n }\n pkt[n] = '\\0';\n ctx->qins[ctx->nqins] = conn;\n ctx->qinpkts[ctx->nqins] = pkt;\n ctx->qinpktlens[ctx->nqins] = n;\n ctx->nqins++;\n}\n\ninline \nstatic void flush_conn(struct net_conn *conn, size_t written) {\n while (written < conn->outlen) {\n ssize_t n;\n if (conn->tls) {\n n = tls_write(conn->tls, conn->fd, conn->out+written, \n conn->outlen-written);\n } else {\n n = write(conn->fd, conn->out+written, conn->outlen-written);\n }\n if (n == -1) {\n if (errno == EAGAIN) {\n continue;\n }\n conn->closed = true;\n break;\n }\n written += n;\n }\n // either everything was written or the socket is closed\n conn->outlen = 0;\n}\n\ninline\nstatic void qattach(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nqattachs; i++) {\n // BGWORK(3)\n // A bgworker has finished, make sure it's added back into the \n // event loop in the correct state.\n struct net_conn *conn = ctx->qattachs[i];\n struct bgworkctx *bgctx = conn->bgctx;\n bgctx->done(conn, bgctx->udata);\n conn->bgctx = 0;\n assert(bgctx);\n xfree(bgctx);\n int ret = delwrite(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n ret = addread(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void qread(struct qthreadctx *ctx) {\n // Read incoming socket data\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // read incoming using uring\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_read(sqe, conn->fd, pkt, PACKETSIZE-1, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n assert(ret == ctx->nqreads);\n for (int i = 0; i < ctx->nqreads; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n = cqe->res;\n if (n < 0) {\n errno = -n;\n n = -1;\n }\n handle_read(n, pkt, conn, ctx);\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // read incoming data using standard syscalls.\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n;\n if (conn->tls) {\n n = tls_read(conn->tls, conn->fd, pkt, PACKETSIZE-1);\n } else {\n n = read(conn->fd, pkt, PACKETSIZE-1);\n }\n handle_read(n, pkt, conn, ctx);\n }\n#ifndef NOURING\n }\n#endif\n}\n\n\ninline\nstatic void qprocess(struct qthreadctx *ctx) {\n // process all new incoming data\n for (int i = 0; i < ctx->nqins; i++) {\n struct net_conn *conn = ctx->qins[i];\n char *p = ctx->qinpkts[i];\n int n = ctx->qinpktlens[i];\n ctx->data(conn, p, n, ctx->udata);\n sumstats(conn, ctx);\n if (conn->bgctx) {\n // BGWORK(1)\n // Connection entered background mode.\n // This means the connection is no longer in the event queue but\n // is still owned by this qthread. Once the bgwork is done the \n // connection will be added back to the queue with addwrite.\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n}\n\ninline\nstatic void qprewrite(struct qthreadctx *ctx) {\n (void)ctx;\n // TODO: perform any prewrite operations\n}\n\ninline\nstatic void qwrite(struct qthreadctx *ctx) {\n // Flush all outgoing socket data.\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // write outgoing using uring\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_write(sqe, conn->fd, conn->out, conn->outlen, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n for (int i = 0; i < ctx->nqouts; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qouts[i];\n ssize_t n = cqe->res;\n if (n == -EAGAIN) {\n n = 0;\n }\n if (n < 0) {\n conn->closed = true;\n } else {\n // Any extra data must be flushed using syscall write.\n flush_conn(conn, n);\n }\n // Either everything was written or the socket is closed\n conn->outlen = 0;\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // Write data using write syscall\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n#ifndef NOURING\n }\n#endif\n}\n\ninline\nstatic void qclose(struct qthreadctx *ctx) {\n // Close all sockets that need to be closed\n for (int i = 0; i < ctx->nqcloses; i++) {\n struct net_conn *conn = ctx->qcloses[i];\n ctx->closed(conn, ctx->udata);\n if (conn->tls) {\n tls_close(conn->tls, conn->fd);\n ctx->ntlsconns--;\n } else {\n close(conn->fd);\n }\n cmap_delete(&ctx->cmap, conn);\n atomic_fetch_sub_explicit(&nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_sub_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n conn_free(conn);\n }\n}\n\nstatic void *qthread(void *arg) {\n struct qthreadctx *ctx = arg;\n#ifndef NOURING\n if (ctx->uring) {\n if (io_uring_queue_init(ctx->queuesize, &ctx->ring, 0) < 0) {\n perror(\"# io_uring_queue_init\");\n abort();\n }\n }\n#endif\n // connection map\n memset(&ctx->cmap, 0, sizeof(struct cmap));\n ctx->cmap.nbuckets = 64;\n size_t size = ctx->cmap.nbuckets*sizeof(struct net_conn*);\n ctx->cmap.buckets = xmalloc(size);\n memset(ctx->cmap.buckets, 0, ctx->cmap.nbuckets*sizeof(struct net_conn*));\n\n ctx->events = xmalloc(sizeof(event_t)*ctx->queuesize);\n ctx->qreads = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->inpkts = xmalloc(PACKETSIZE*ctx->queuesize);\n ctx->qins = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qinpkts = xmalloc(sizeof(char*)*ctx->queuesize);\n ctx->qinpktlens = xmalloc(sizeof(int)*ctx->queuesize);\n ctx->qcloses = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qouts = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qattachs = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n\n while (1) {\n sumstats_global(ctx);\n ctx->nevents = getevents(ctx->qfd, ctx->events, ctx->queuesize, 1, 0);\n if (ctx->nevents <= 0) {\n if (ctx->nevents == -1 && errno != EINTR) {\n perror(\"# getevents\");\n abort();\n }\n continue;\n }\n // reset, accept, attach, read, process, prewrite, write, close\n qreset(ctx); // reset the step queues\n qaccept(ctx); // accept incoming connections\n qattach(ctx); // attach bg workers. uncommon\n qread(ctx); // read from sockets\n qprocess(ctx); // process new socket data\n qprewrite(ctx); // perform any prewrite operations, such as fsync\n qwrite(ctx); // write to sockets\n qclose(ctx); // close any sockets that need closing\n }\n return 0;\n}\n\nstatic int listen_tcp(const char *host, const char *port, bool reuseport, \n int backlog)\n{\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return 0;\n }\n int ret;\n host = host ? host : \"127.0.0.1\";\n port = port ? port : \"0\";\n struct addrinfo hints = { 0 }, *addrs;\n hints.ai_family = AF_UNSPEC; \n hints.ai_socktype = SOCK_STREAM;\n hints.ai_protocol = IPPROTO_TCP;\n ret = getaddrinfo(host, port, &hints, &addrs);\n if (ret != 0) {\n fprintf(stderr, \"# getaddrinfo: %s: %s:%s\", gai_strerror(ret), host,\n port);\n abort();\n }\n struct addrinfo *ainfo = addrs;\n while (ainfo->ai_family != PF_INET) {\n ainfo = ainfo->ai_next;\n }\n assert(ainfo);\n int fd = socket(ainfo->ai_family, ainfo->ai_socktype, ainfo->ai_protocol);\n if (fd == -1) {\n perror(\"# socket(tcp)\");\n abort();\n }\n if (reuseport) {\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &(int){1}, \n sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseport)\");\n abort();\n }\n }\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &(int){1},sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseaddr)\");\n abort();\n }\n ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n ret = bind(fd, ainfo->ai_addr, ainfo->ai_addrlen);\n if (ret == -1) {\n fprintf(stderr, \"# bind(tcp): %s:%s\", host, port);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(tcp): %s:%s\", host, port);\n abort();\n }\n freeaddrinfo(addrs);\n return fd;\n}\n\nstatic int listen_unixsock(const char *unixsock, int backlog) {\n if (!unixsock || !*unixsock) {\n return 0;\n }\n struct sockaddr_un unaddr;\n int fd = socket(AF_UNIX, SOCK_STREAM, 0);\n if (fd == -1) {\n perror(\"# socket(unix)\");\n abort();\n }\n memset(&unaddr, 0, sizeof(struct sockaddr_un));\n unaddr.sun_family = AF_UNIX;\n strncpy(unaddr.sun_path, unixsock, sizeof(unaddr.sun_path) - 1);\n int ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n unlink(unixsock);\n ret = bind(fd, (struct sockaddr *)&unaddr, sizeof(struct sockaddr_un));\n if (ret == -1) {\n fprintf(stderr, \"# bind(unix): %s\", unixsock);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(unix): %s\", unixsock);\n abort();\n }\n return fd;\n}\n\nstatic atomic_uintptr_t all_ctxs = 0;\n\n// current connections\nsize_t net_nconns(void) {\n return atomic_load_explicit(&nconns, __ATOMIC_ACQUIRE);\n}\n\n// total connections ever\nsize_t net_tconns(void) {\n return atomic_load_explicit(&tconns, __ATOMIC_ACQUIRE);\n}\n\n// total rejected connections ever\nsize_t net_rconns(void) {\n return atomic_load_explicit(&rconns, __ATOMIC_ACQUIRE);\n}\n\nstatic void warmupunix(const char *unixsock, int nsocks) {\n if (!unixsock || !*unixsock) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n socks[i] = socket(AF_UNIX, SOCK_STREAM, 0);\n if (socks[i] == -1) {\n socks[i] = 0;\n continue;\n }\n struct sockaddr_un addr;\n memset(&addr, 0, sizeof(struct sockaddr_un));\n addr.sun_family = AF_UNIX;\n strncpy(addr.sun_path, unixsock, sizeof(addr.sun_path) - 1);\n if (connect(socks[i], (struct sockaddr *)&addr, \n sizeof(struct sockaddr_un)) == -1)\n {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup unix socket (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\n\nstatic void warmuptcp(const char *host, const char *port, int nsocks) {\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n struct addrinfo hints, *res;\n memset(&hints, 0, sizeof(hints));\n hints.ai_family = AF_INET;\n hints.ai_socktype = SOCK_STREAM;\n int err = getaddrinfo(host, port, &hints, &res);\n if (err != 0) {\n continue;\n }\n socks[i] = socket(res->ai_family, res->ai_socktype, res->ai_protocol);\n if (socks[i] == -1) {\n freeaddrinfo(res);\n continue;\n }\n int ret = connect(socks[i], res->ai_addr, res->ai_addrlen);\n freeaddrinfo(res);\n if (ret == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup tcp (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\nstatic void *thwarmup(void *arg) {\n // Perform a warmup of the epoll queues and listeners by making a quick\n // connection to each.\n struct net_opts *opts = arg;\n warmupunix(opts->unixsock, opts->nthreads*2);\n warmuptcp(opts->host, opts->port, opts->nthreads*2);\n return 0;\n}\n\nvoid net_main(struct net_opts *opts) {\n (void)delread;\n int sfd[3] = {\n listen_tcp(opts->host, opts->port, opts->reuseport, opts->backlog),\n listen_unixsock(opts->unixsock, opts->backlog),\n listen_tcp(opts->host, opts->tlsport, opts->reuseport, opts->backlog),\n };\n if (!sfd[0] && !sfd[1] && !sfd[2]) {\n printf(\"# No listeners provided\\n\");\n abort();\n }\n opts->listening(opts->udata);\n struct qthreadctx *ctxs = xmalloc(sizeof(struct qthreadctx)*opts->nthreads);\n memset(ctxs, 0, sizeof(struct qthreadctx)*opts->nthreads);\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n ctx->nthreads = opts->nthreads;\n ctx->tcpnodelay = opts->tcpnodelay;\n ctx->keepalive = opts->keepalive;\n ctx->quickack = opts->quickack;\n ctx->uring = !opts->nouring;\n ctx->ctxs = ctxs;\n ctx->index = i;\n ctx->maxconns = opts->maxconns;\n ctx->sfd = sfd;\n ctx->data = opts->data;\n ctx->udata = opts->udata;\n ctx->opened = opts->opened;\n ctx->closed = opts->closed;\n ctx->qfd = evqueue();\n if (ctx->qfd == -1) {\n perror(\"# evqueue\");\n abort();\n }\n atomic_init(&ctx->nconns, 0);\n for (int j = 0; j < 3; j++) {\n if (sfd[j]) {\n int ret = addread(ctx->qfd, sfd[j]);\n if (ret == -1) {\n perror(\"# addread\");\n abort();\n }\n }\n }\n ctx->unixsock = opts->unixsock;\n ctx->queuesize = opts->queuesize;\n }\n atomic_store(&all_ctxs, (uintptr_t)(void*)ctxs);\n opts->ready(opts->udata);\n if (!opts->nowarmup) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thwarmup, opts);\n if (ret != -1) {\n pthread_detach(th);\n }\n }\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n if (i == opts->nthreads-1) {\n qthread(ctx);\n } else {\n int ret = pthread_create(&ctx->th, 0, qthread, ctx);\n if (ret == -1) {\n perror(\"# pthread_create\");\n abort();\n }\n }\n }\n}\n\nstatic void *bgwork(void *arg) {\n struct bgworkctx *bgctx = arg;\n bgctx->work(bgctx->udata);\n // We are not in the same thread context as the event loop that owns this\n // connection. Adding the writer to the queue will allow for the loop\n // thread to gracefully continue the operation and then call the 'done'\n // callback.\n int ret = addwrite(bgctx->conn->ctx->qfd, bgctx->conn->fd);\n assert(ret == 0); (void)ret;\n return 0;\n}\n\n// net_conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool net_conn_bgwork(struct net_conn *conn, void (*work)(void *udata), \n void (*done)(struct net_conn *conn, void *udata), void *udata)\n{\n if (conn->bgctx || conn->closed) {\n return false;\n }\n struct qthreadctx *ctx = conn->ctx;\n int ret = delread(ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n conn->bgctx = xmalloc(sizeof(struct bgworkctx));\n memset(conn->bgctx, 0, sizeof(struct bgworkctx));\n conn->bgctx->conn = conn;\n conn->bgctx->done = done;\n conn->bgctx->work = work;\n conn->bgctx->udata = udata;\n pthread_t th;\n if (pthread_create(&th, 0, bgwork, conn->bgctx) == -1) {\n // Failed to create thread. Revert and return false.\n ret = addread(ctx->qfd, conn->fd);\n assert(ret == 0);\n xfree(conn->bgctx);\n conn->bgctx = 0;\n return false;\n } else {\n pthread_detach(th);\n }\n return true;\n}\n\nbool net_conn_bgworking(struct net_conn *conn) {\n return conn->bgctx != 0;\n}\n\nvoid net_stat_cmd_get_incr(struct net_conn *conn) {\n conn->stat_cmd_get++;\n}\n\nvoid net_stat_cmd_set_incr(struct net_conn *conn) {\n conn->stat_cmd_set++;\n}\n\nvoid net_stat_get_hits_incr(struct net_conn *conn) {\n conn->stat_get_hits++;\n}\n\nvoid net_stat_get_misses_incr(struct net_conn *conn) {\n conn->stat_get_misses++;\n}\n\nbool net_conn_istls(struct net_conn *conn) {\n return conn->tls != 0;\n}\n"], ["/pogocache/src/args.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit args.c provides functions for managing command arguments\n#include \n#include \n#include \n#include \"args.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n\nconst char *args_at(struct args *args, int idx, size_t *len) {\n *len = args->bufs[idx].len;\n return args->bufs[idx].data;\n}\n\nint args_count(struct args *args) {\n return args->len;\n}\n\nbool args_eq(struct args *args, int index, const char *str) {\n if ((size_t)index >= args->len) {\n return false;\n }\n size_t alen = args->bufs[index].len;\n const char *arg = args->bufs[index].data;\n size_t slen = strlen(str); \n if (alen != slen) {\n return false;\n }\n for (size_t i = 0; i < slen ; i++) {\n if (tolower(str[i]) != tolower(arg[i])) {\n return false;\n }\n }\n return true;\n}\n\nvoid args_append(struct args *args, const char *data, size_t len,\n bool zerocopy)\n{\n#ifdef NOZEROCOPY\n zerocopy = 0;\n#endif\n if (args->len == args->cap) {\n args->cap = args->cap == 0 ? 4 : args->cap*2;\n args->bufs = xrealloc(args->bufs, args->cap * sizeof(struct buf));\n memset(&args->bufs[args->len], 0, (args->cap-args->len) * \n sizeof(struct buf));\n }\n if (zerocopy) {\n buf_clear(&args->bufs[args->len]);\n args->bufs[args->len].len = len;\n args->bufs[args->len].data = (char*)data;\n } else {\n args->bufs[args->len].len = 0;\n buf_append(&args->bufs[args->len], data, len);\n }\n if (args->len == 0) {\n args->zerocopy = zerocopy;\n } else {\n args->zerocopy = args->zerocopy && zerocopy;\n }\n args->len++;\n}\n\nvoid args_clear(struct args *args) {\n if (!args->zerocopy) {\n for (size_t i = 0 ; i < args->len; i++) {\n buf_clear(&args->bufs[i]);\n }\n }\n args->len = 0;\n}\n\nvoid args_free(struct args *args) {\n args_clear(args);\n xfree(args->bufs);\n}\n\nvoid args_print(struct args *args) {\n printf(\". \");\n for (size_t i = 0; i < args->len; i++) {\n char *buf = args->bufs[i].data;\n int len = args->bufs[i].len;\n printf(\"[\"); \n binprint(buf, len);\n printf(\"] \");\n }\n printf(\"\\n\");\n}\n\n// remove the first item\nvoid args_remove_first(struct args *args) {\n if (args->len > 0) {\n buf_clear(&args->bufs[0]);\n for (size_t i = 1; i < args->len; i++) {\n args->bufs[i-1] = args->bufs[i];\n }\n args->len--;\n }\n}\n"], ["/pogocache/src/lz4.c", "/*\n LZ4 - Fast LZ compression algorithm\n Copyright (C) 2011-2023, Yann Collet.\n\n BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following disclaimer\n in the documentation and/or other materials provided with the\n distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n You can contact the author at :\n - LZ4 homepage : http://www.lz4.org\n - LZ4 source repository : https://github.com/lz4/lz4\n*/\n\n/*-************************************\n* Tuning parameters\n**************************************/\n/*\n * LZ4_HEAPMODE :\n * Select how stateless compression functions like `LZ4_compress_default()`\n * allocate memory for their hash table,\n * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).\n */\n#ifndef LZ4_HEAPMODE\n# define LZ4_HEAPMODE 0\n#endif\n\n/*\n * LZ4_ACCELERATION_DEFAULT :\n * Select \"acceleration\" for LZ4_compress_fast() when parameter value <= 0\n */\n#define LZ4_ACCELERATION_DEFAULT 1\n/*\n * LZ4_ACCELERATION_MAX :\n * Any \"acceleration\" value higher than this threshold\n * get treated as LZ4_ACCELERATION_MAX instead (fix #876)\n */\n#define LZ4_ACCELERATION_MAX 65537\n\n\n/*-************************************\n* CPU Feature Detection\n**************************************/\n/* LZ4_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n * It can generate buggy code on targets which assembly generation depends on alignment.\n * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */\n# if defined(__GNUC__) && \\\n ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \\\n || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n# define LZ4_FORCE_MEMORY_ACCESS 2\n# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)\n# define LZ4_FORCE_MEMORY_ACCESS 1\n# endif\n#endif\n\n/*\n * LZ4_FORCE_SW_BITCOUNT\n * Define this parameter if your target system or compiler does not support hardware bit count\n */\n#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */\n# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */\n# define LZ4_FORCE_SW_BITCOUNT\n#endif\n\n\n\n/*-************************************\n* Dependency\n**************************************/\n/*\n * LZ4_SRC_INCLUDED:\n * Amalgamation flag, whether lz4.c is included\n */\n#ifndef LZ4_SRC_INCLUDED\n# define LZ4_SRC_INCLUDED 1\n#endif\n\n#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS\n# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */\n#endif\n\n#ifndef LZ4_STATIC_LINKING_ONLY\n# define LZ4_STATIC_LINKING_ONLY\n#endif\n#include \"lz4.h\"\n/* see also \"memory routines\" below */\n\n\n/*-************************************\n* Compiler Options\n**************************************/\n#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */\n# include /* only present in VS2005+ */\n# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */\n# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */\n# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */\n#endif /* _MSC_VER */\n\n#ifndef LZ4_FORCE_INLINE\n# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */\n# define LZ4_FORCE_INLINE static __forceinline\n# else\n# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */\n# if defined (__GNUC__) || defined (__clang__)\n# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))\n# else\n# define LZ4_FORCE_INLINE static inline\n# endif\n# else\n# define LZ4_FORCE_INLINE static\n# endif /* __STDC_VERSION__ */\n# endif /* _MSC_VER */\n#endif /* LZ4_FORCE_INLINE */\n\n/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE\n * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,\n * together with a simple 8-byte copy loop as a fall-back path.\n * However, this optimization hurts the decompression speed by >30%,\n * because the execution does not go to the optimized loop\n * for typical compressible data, and all of the preamble checks\n * before going to the fall-back path become useless overhead.\n * This optimization happens only with the -O3 flag, and -O2 generates\n * a simple 8-byte copy loop.\n * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8\n * functions are annotated with __attribute__((optimize(\"O2\"))),\n * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute\n * of LZ4_wildCopy8 does not affect the compression speed.\n */\n#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)\n# define LZ4_FORCE_O2 __attribute__((optimize(\"O2\")))\n# undef LZ4_FORCE_INLINE\n# define LZ4_FORCE_INLINE static __inline __attribute__((optimize(\"O2\"),always_inline))\n#else\n# define LZ4_FORCE_O2\n#endif\n\n#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)\n# define expect(expr,value) (__builtin_expect ((expr),(value)) )\n#else\n# define expect(expr,value) (expr)\n#endif\n\n#ifndef likely\n#define likely(expr) expect((expr) != 0, 1)\n#endif\n#ifndef unlikely\n#define unlikely(expr) expect((expr) != 0, 0)\n#endif\n\n/* Should the alignment test prove unreliable, for some reason,\n * it can be disabled by setting LZ4_ALIGN_TEST to 0 */\n#ifndef LZ4_ALIGN_TEST /* can be externally provided */\n# define LZ4_ALIGN_TEST 1\n#endif\n\n\n/*-************************************\n* Memory routines\n**************************************/\n\n/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :\n * Disable relatively high-level LZ4/HC functions that use dynamic memory\n * allocation functions (malloc(), calloc(), free()).\n *\n * Note that this is a compile-time switch. And since it disables\n * public/stable LZ4 v1 API functions, we don't recommend using this\n * symbol to generate a library for distribution.\n *\n * The following public functions are removed when this symbol is defined.\n * - lz4 : LZ4_createStream, LZ4_freeStream,\n * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)\n * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,\n * LZ4_createHC (deprecated), LZ4_freeHC (deprecated)\n * - lz4frame, lz4file : All LZ4F_* functions\n */\n#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\n# define ALLOC(s) lz4_error_memory_allocation_is_disabled\n# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled\n# define FREEMEM(p) lz4_error_memory_allocation_is_disabled\n#elif defined(LZ4_USER_MEMORY_FUNCTIONS)\n/* memory management functions can be customized by user project.\n * Below functions must exist somewhere in the Project\n * and be available at link time */\nvoid* LZ4_malloc(size_t s);\nvoid* LZ4_calloc(size_t n, size_t s);\nvoid LZ4_free(void* p);\n# define ALLOC(s) LZ4_malloc(s)\n# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)\n# define FREEMEM(p) LZ4_free(p)\n#else\n# include /* malloc, calloc, free */\n# define ALLOC(s) malloc(s)\n# define ALLOC_AND_ZERO(s) calloc(1,s)\n# define FREEMEM(p) free(p)\n#endif\n\n#if ! LZ4_FREESTANDING\n# include /* memset, memcpy */\n#endif\n#if !defined(LZ4_memset)\n# define LZ4_memset(p,v,s) memset((p),(v),(s))\n#endif\n#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s))\n\n\n/*-************************************\n* Common Constants\n**************************************/\n#define MINMATCH 4\n\n#define WILDCOPYLENGTH 8\n#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */\n#define FASTLOOP_SAFE_DISTANCE 64\nstatic const int LZ4_minLength = (MFLIMIT+1);\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define LZ4_DISTANCE_ABSOLUTE_MAX 65535\n#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */\n# error \"LZ4_DISTANCE_MAX is too big : must be <= 65535\"\n#endif\n\n#define ML_BITS 4\n#define ML_MASK ((1U<=1)\n# include \n#else\n# ifndef assert\n# define assert(condition) ((void)0)\n# endif\n#endif\n\n#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */\n\n#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)\n# include \n static int g_debuglog_enable = 1;\n# define DEBUGLOG(l, ...) { \\\n if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \\\n fprintf(stderr, __FILE__ \" %i: \", __LINE__); \\\n fprintf(stderr, __VA_ARGS__); \\\n fprintf(stderr, \" \\n\"); \\\n } }\n#else\n# define DEBUGLOG(l, ...) {} /* disabled */\n#endif\n\nstatic int LZ4_isAligned(const void* ptr, size_t alignment)\n{\n return ((size_t)ptr & (alignment -1)) == 0;\n}\n\n\n/*-************************************\n* Types\n**************************************/\n#include \n#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include \n typedef uint8_t BYTE;\n typedef uint16_t U16;\n typedef uint32_t U32;\n typedef int32_t S32;\n typedef uint64_t U64;\n typedef uintptr_t uptrval;\n#else\n# if UINT_MAX != 4294967295UL\n# error \"LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4\"\n# endif\n typedef unsigned char BYTE;\n typedef unsigned short U16;\n typedef unsigned int U32;\n typedef signed int S32;\n typedef unsigned long long U64;\n typedef size_t uptrval; /* generally true, except OpenVMS-64 */\n#endif\n\n#if defined(__x86_64__)\n typedef U64 reg_t; /* 64-bits in x32 mode */\n#else\n typedef size_t reg_t; /* 32-bits in x32 mode */\n#endif\n\ntypedef enum {\n notLimited = 0,\n limitedOutput = 1,\n fillOutput = 2\n} limitedOutput_directive;\n\n\n/*-************************************\n* Reading and writing into memory\n**************************************/\n\n/**\n * LZ4 relies on memcpy with a constant size being inlined. In freestanding\n * environments, the compiler can't assume the implementation of memcpy() is\n * standard compliant, so it can't apply its specialized memcpy() inlining\n * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze\n * memcpy() as if it were standard compliant, so it can inline it in freestanding\n * environments. This is needed when decompressing the Linux Kernel, for example.\n */\n#if !defined(LZ4_memcpy)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)\n# else\n# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)\n# endif\n#endif\n\n#if !defined(LZ4_memmove)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memmove __builtin_memmove\n# else\n# define LZ4_memmove memmove\n# endif\n#endif\n\nstatic unsigned LZ4_isLittleEndian(void)\n{\n const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */\n return one.c[0];\n}\n\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))\n#elif defined(_MSC_VER)\n#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))\n#endif\n\n#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)\n/* lie to the compiler about data alignment; use with caution */\n\nstatic U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }\nstatic U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }\nstatic reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }\n\n#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\nLZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;\nLZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;\nLZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;\n\nstatic U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }\nstatic U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }\nstatic reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }\n\n#else /* safe and portable access using memcpy() */\n\nstatic U16 LZ4_read16(const void* memPtr)\n{\n U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic U32 LZ4_read32(const void* memPtr)\n{\n U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic reg_t LZ4_read_ARCH(const void* memPtr)\n{\n reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic void LZ4_write16(void* memPtr, U16 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\nstatic void LZ4_write32(void* memPtr, U32 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* LZ4_FORCE_MEMORY_ACCESS */\n\n\nstatic U16 LZ4_readLE16(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read16(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U16)((U16)p[0] | (p[1]<<8));\n }\n}\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\nstatic U32 LZ4_readLE32(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read32(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);\n }\n}\n#endif\n\nstatic void LZ4_writeLE16(void* memPtr, U16 value)\n{\n if (LZ4_isLittleEndian()) {\n LZ4_write16(memPtr, value);\n } else {\n BYTE* p = (BYTE*)memPtr;\n p[0] = (BYTE) value;\n p[1] = (BYTE)(value>>8);\n }\n}\n\n/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */\nLZ4_FORCE_INLINE\nvoid LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */\nLZ4_FORCE_INLINE void\nLZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH\n * - there is at least 12 bytes available to write after dstEnd */\nLZ4_FORCE_INLINE void\nLZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)\n{\n BYTE v[8];\n\n assert(dstEnd >= dstPtr + MINMATCH);\n\n switch(offset) {\n case 1:\n MEM_INIT(v, *srcPtr, 8);\n break;\n case 2:\n LZ4_memcpy(v, srcPtr, 2);\n LZ4_memcpy(&v[2], srcPtr, 2);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(push)\n# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */\n#endif\n LZ4_memcpy(&v[4], v, 4);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(pop)\n#endif\n break;\n case 4:\n LZ4_memcpy(v, srcPtr, 4);\n LZ4_memcpy(&v[4], srcPtr, 4);\n break;\n default:\n LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);\n return;\n }\n\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n while (dstPtr < dstEnd) {\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n }\n}\n#endif\n\n\n/*-************************************\n* Common functions\n**************************************/\nstatic unsigned LZ4_NbCommonBytes (reg_t val)\n{\n assert(val != 0);\n if (LZ4_isLittleEndian()) {\n if (sizeof(val) == 8) {\n# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)\n/*-*************************************************************************************************\n* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.\n* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics\n* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.\n****************************************************************************************************/\n# if defined(__clang__) && (__clang_major__ < 10)\n /* Avoid undefined clang-cl intrinsics issue.\n * See https://github.com/lz4/lz4/pull/1017 for details. */\n return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;\n# else\n /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */\n return (unsigned)_tzcnt_u64(val) >> 3;\n# endif\n# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r = 0;\n _BitScanForward64(&r, (U64)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctzll((U64)val) >> 3;\n# else\n const U64 m = 0x0101010101010101ULL;\n val ^= val - 1;\n return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);\n# endif\n } else /* 32 bits */ {\n# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r;\n _BitScanForward(&r, (U32)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctz((U32)val) >> 3;\n# else\n const U32 m = 0x01010101;\n return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;\n# endif\n }\n } else /* Big Endian CPU */ {\n if (sizeof(val)==8) {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clzll((U64)val) >> 3;\n# else\n#if 1\n /* this method is probably faster,\n * but adds a 128 bytes lookup table */\n static const unsigned char ctz7_tab[128] = {\n 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n };\n U64 const mask = 0x0101010101010101ULL;\n U64 const t = (((val >> 8) - mask) | val) & mask;\n return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];\n#else\n /* this method doesn't consume memory space like the previous one,\n * but it contains several branches,\n * that may end up slowing execution */\n static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.\n Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.\n Note that this code path is never triggered in 32-bits mode. */\n unsigned r;\n if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }\n if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\n r += (!val);\n return r;\n#endif\n# endif\n } else /* 32 bits */ {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clz((U32)val) >> 3;\n# else\n val >>= 8;\n val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |\n (val + 0x00FF0000)) >> 24;\n return (unsigned)val ^ 3;\n# endif\n }\n }\n}\n\n\n#define STEPSIZE sizeof(reg_t)\nLZ4_FORCE_INLINE\nunsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)\n{\n const BYTE* const pStart = pIn;\n\n if (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) {\n pIn+=STEPSIZE; pMatch+=STEPSIZE;\n } else {\n return LZ4_NbCommonBytes(diff);\n } }\n\n while (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }\n pIn += LZ4_NbCommonBytes(diff);\n return (unsigned)(pIn - pStart);\n }\n\n if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }\n if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }\n if ((pIn compression run slower on incompressible data */\n\n\n/*-************************************\n* Local Structures and types\n**************************************/\ntypedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;\n\n/**\n * This enum distinguishes several different modes of accessing previous\n * content in the stream.\n *\n * - noDict : There is no preceding content.\n * - withPrefix64k : Table entries up to ctx->dictSize before the current blob\n * blob being compressed are valid and refer to the preceding\n * content (of length ctx->dictSize), which is available\n * contiguously preceding in memory the content currently\n * being compressed.\n * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere\n * else in memory, starting at ctx->dictionary with length\n * ctx->dictSize.\n * - usingDictCtx : Everything concerning the preceding content is\n * in a separate context, pointed to by ctx->dictCtx.\n * ctx->dictionary, ctx->dictSize, and table entries\n * in the current context that refer to positions\n * preceding the beginning of the current compression are\n * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx\n * ->dictSize describe the location and size of the preceding\n * content, and matches are found by looking in the ctx\n * ->dictCtx->hashTable.\n */\ntypedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;\ntypedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;\n\n\n/*-************************************\n* Local Utils\n**************************************/\nint LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }\nconst char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }\nint LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }\nint LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }\n\n\n/*-****************************************\n* Internal Definitions, used only in Tests\n*******************************************/\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);\n\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize);\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize);\n#if defined (__cplusplus)\n}\n#endif\n\n/*-******************************\n* Compression functions\n********************************/\nLZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)\n{\n if (tableType == byU16)\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));\n else\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)\n{\n const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;\n if (LZ4_isLittleEndian()) {\n const U64 prime5bytes = 889523592379ULL;\n return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));\n } else {\n const U64 prime8bytes = 11400714785074694791ULL;\n return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));\n }\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)\n{\n if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\n return LZ4_hash4(LZ4_readLE32(p), tableType);\n#else\n return LZ4_hash4(LZ4_read32(p), tableType);\n#endif\n}\n\nLZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: { /* illegal! */ assert(0); return; }\n case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }\n }\n}\n\nLZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: /* fallthrough */\n case byPtr: { /* illegal! */ assert(0); return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }\n }\n}\n\n/* LZ4_putPosition*() : only used in byPtr mode */\nLZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,\n void* tableBase, tableType_t const tableType)\n{\n const BYTE** const hashTable = (const BYTE**)tableBase;\n assert(tableType == byPtr); (void)tableType;\n hashTable[h] = p;\n}\n\nLZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n LZ4_putPositionOnHash(p, h, tableBase, tableType);\n}\n\n/* LZ4_getIndexOnHash() :\n * Index of match position registered in hash table.\n * hash position must be calculated by using base+index, or dictBase+index.\n * Assumption 1 : only valid if tableType == byU32 or byU16.\n * Assumption 2 : h is presumed valid (within limits of hash table)\n */\nLZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);\n if (tableType == byU32) {\n const U32* const hashTable = (const U32*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-2)));\n return hashTable[h];\n }\n if (tableType == byU16) {\n const U16* const hashTable = (const U16*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-1)));\n return hashTable[h];\n }\n assert(0); return 0; /* forbidden case */\n}\n\nstatic const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n assert(tableType == byPtr); (void)tableType;\n { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }\n}\n\nLZ4_FORCE_INLINE const BYTE*\nLZ4_getPosition(const BYTE* p,\n const void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n return LZ4_getPositionOnHash(h, tableBase, tableType);\n}\n\nLZ4_FORCE_INLINE void\nLZ4_prepareTable(LZ4_stream_t_internal* const cctx,\n const int inputSize,\n const tableType_t tableType) {\n /* If the table hasn't been used, it's guaranteed to be zeroed out, and is\n * therefore safe to use no matter what mode we're in. Otherwise, we figure\n * out if it's safe to leave as is or whether it needs to be reset.\n */\n if ((tableType_t)cctx->tableType != clearedTable) {\n assert(inputSize >= 0);\n if ((tableType_t)cctx->tableType != tableType\n || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)\n || ((tableType == byU32) && cctx->currentOffset > 1 GB)\n || tableType == byPtr\n || inputSize >= 4 KB)\n {\n DEBUGLOG(4, \"LZ4_prepareTable: Resetting table in %p\", cctx);\n MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);\n cctx->currentOffset = 0;\n cctx->tableType = (U32)clearedTable;\n } else {\n DEBUGLOG(4, \"LZ4_prepareTable: Re-use hash table (no reset)\");\n }\n }\n\n /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,\n * is faster than compressing without a gap.\n * However, compressing with currentOffset == 0 is faster still,\n * so we preserve that case.\n */\n if (cctx->currentOffset != 0 && tableType == byU32) {\n DEBUGLOG(5, \"LZ4_prepareTable: adding 64KB to currentOffset\");\n cctx->currentOffset += 64 KB;\n }\n\n /* Finally, clear history */\n cctx->dictCtx = NULL;\n cctx->dictionary = NULL;\n cctx->dictSize = 0;\n}\n\n/** LZ4_compress_generic_validated() :\n * inlined, to ensure branches are decided at compilation time.\n * The following conditions are presumed already validated:\n * - source != NULL\n * - inputSize > 0\n */\nLZ4_FORCE_INLINE int LZ4_compress_generic_validated(\n LZ4_stream_t_internal* const cctx,\n const char* const source,\n char* const dest,\n const int inputSize,\n int* inputConsumed, /* only written when outputDirective == fillOutput */\n const int maxOutputSize,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n int result;\n const BYTE* ip = (const BYTE*)source;\n\n U32 const startIndex = cctx->currentOffset;\n const BYTE* base = (const BYTE*)source - startIndex;\n const BYTE* lowLimit;\n\n const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;\n const BYTE* const dictionary =\n dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;\n const U32 dictSize =\n dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;\n const U32 dictDelta =\n (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */\n\n int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);\n U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */\n const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;\n const BYTE* anchor = (const BYTE*) source;\n const BYTE* const iend = ip + inputSize;\n const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;\n const BYTE* const matchlimit = iend - LASTLITERALS;\n\n /* the dictCtx currentOffset is indexed on the start of the dictionary,\n * while a dictionary in the current context precedes the currentOffset */\n const BYTE* dictBase = (dictionary == NULL) ? NULL :\n (dictDirective == usingDictCtx) ?\n dictionary + dictSize - dictCtx->currentOffset :\n dictionary + dictSize - startIndex;\n\n BYTE* op = (BYTE*) dest;\n BYTE* const olimit = op + maxOutputSize;\n\n U32 offset = 0;\n U32 forwardH;\n\n DEBUGLOG(5, \"LZ4_compress_generic_validated: srcSize=%i, tableType=%u\", inputSize, tableType);\n assert(ip != NULL);\n if (tableType == byU16) assert(inputSize= 1);\n\n lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);\n\n /* Update context state */\n if (dictDirective == usingDictCtx) {\n /* Subsequent linked blocks can't use the dictionary. */\n /* Instead, they use the block we just compressed. */\n cctx->dictCtx = NULL;\n cctx->dictSize = (U32)inputSize;\n } else {\n cctx->dictSize += (U32)inputSize;\n }\n cctx->currentOffset += (U32)inputSize;\n cctx->tableType = (U32)tableType;\n\n if (inputSizehashTable, byPtr);\n } else {\n LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);\n } }\n ip++; forwardH = LZ4_hashPosition(ip, tableType);\n\n /* Main Loop */\n for ( ; ; ) {\n const BYTE* match;\n BYTE* token;\n const BYTE* filledIp;\n\n /* Find a match */\n if (tableType == byPtr) {\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);\n\n } while ( (match+LZ4_DISTANCE_MAX < ip)\n || (LZ4_read32(match) != LZ4_read32(ip)) );\n\n } else { /* byU32, byU16 */\n\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n U32 const current = (U32)(forwardIp - base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex <= current);\n assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n matchIndex += dictDelta; /* make dictCtx index comparable with current context */\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else if (dictDirective == usingExtDict) {\n if (matchIndex < startIndex) {\n DEBUGLOG(7, \"extDict candidate: matchIndex=%5u < startIndex=%5u\", matchIndex, startIndex);\n assert(startIndex - matchIndex >= MINMATCH);\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else { /* single continuous memory segment */\n match = base + matchIndex;\n }\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n\n DEBUGLOG(7, \"candidate at pos=%u (offset=%u \\n\", matchIndex, current - matchIndex);\n if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */\n assert(matchIndex < current);\n if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))\n && (matchIndex+LZ4_DISTANCE_MAX < current)) {\n continue;\n } /* too far */\n assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */\n\n if (LZ4_read32(match) == LZ4_read32(ip)) {\n if (maybe_extMem) offset = current - matchIndex;\n break; /* match found */\n }\n\n } while(1);\n }\n\n /* Catch up */\n filledIp = ip;\n assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */\n if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {\n do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));\n }\n\n /* Encode Literals */\n { unsigned const litLength = (unsigned)(ip - anchor);\n token = op++;\n if ((outputDirective == limitedOutput) && /* Check output buffer overflow */\n (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n if ((outputDirective == fillOutput) &&\n (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {\n op--;\n goto _last_literals;\n }\n if (litLength >= RUN_MASK) {\n unsigned len = litLength - RUN_MASK;\n *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255;\n *op++ = (BYTE)len;\n }\n else *token = (BYTE)(litLength< olimit)) {\n /* the match was too close to the end, rewind and go to last literals */\n op = token;\n goto _last_literals;\n }\n\n /* Encode Offset */\n if (maybe_extMem) { /* static test */\n DEBUGLOG(6, \" with offset=%u (ext if > %i)\", offset, (int)(ip - (const BYTE*)source));\n assert(offset <= LZ4_DISTANCE_MAX && offset > 0);\n LZ4_writeLE16(op, (U16)offset); op+=2;\n } else {\n DEBUGLOG(6, \" with offset=%u (same segment)\", (U32)(ip - match));\n assert(ip-match <= LZ4_DISTANCE_MAX);\n LZ4_writeLE16(op, (U16)(ip - match)); op+=2;\n }\n\n /* Encode MatchLength */\n { unsigned matchCode;\n\n if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)\n && (lowLimit==dictionary) /* match within extDict */ ) {\n const BYTE* limit = ip + (dictEnd-match);\n assert(dictEnd > match);\n if (limit > matchlimit) limit = matchlimit;\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);\n ip += (size_t)matchCode + MINMATCH;\n if (ip==limit) {\n unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);\n matchCode += more;\n ip += more;\n }\n DEBUGLOG(6, \" with matchLength=%u starting in extDict\", matchCode+MINMATCH);\n } else {\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);\n ip += (size_t)matchCode + MINMATCH;\n DEBUGLOG(6, \" with matchLength=%u\", matchCode+MINMATCH);\n }\n\n if ((outputDirective) && /* Check output buffer overflow */\n (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {\n if (outputDirective == fillOutput) {\n /* Match description too long : reduce it */\n U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;\n ip -= matchCode - newMatchCode;\n assert(newMatchCode < matchCode);\n matchCode = newMatchCode;\n if (unlikely(ip <= filledIp)) {\n /* We have already filled up to filledIp so if ip ends up less than filledIp\n * we have positions in the hash table beyond the current position. This is\n * a problem if we reuse the hash table. So we have to remove these positions\n * from the hash table.\n */\n const BYTE* ptr;\n DEBUGLOG(5, \"Clearing %u positions\", (U32)(filledIp - ip));\n for (ptr = ip; ptr <= filledIp; ++ptr) {\n U32 const h = LZ4_hashPosition(ptr, tableType);\n LZ4_clearHash(h, cctx->hashTable, tableType);\n }\n }\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n if (matchCode >= ML_MASK) {\n *token += ML_MASK;\n matchCode -= ML_MASK;\n LZ4_write32(op, 0xFFFFFFFF);\n while (matchCode >= 4*255) {\n op+=4;\n LZ4_write32(op, 0xFFFFFFFF);\n matchCode -= 4*255;\n }\n op += matchCode / 255;\n *op++ = (BYTE)(matchCode % 255);\n } else\n *token += (BYTE)(matchCode);\n }\n /* Ensure we have enough space for the last literals. */\n assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));\n\n anchor = ip;\n\n /* Test end of chunk */\n if (ip >= mflimitPlusOne) break;\n\n /* Fill table */\n { U32 const h = LZ4_hashPosition(ip-2, tableType);\n if (tableType == byPtr) {\n LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);\n } else {\n U32 const idx = (U32)((ip-2) - base);\n LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);\n } }\n\n /* Test next position */\n if (tableType == byPtr) {\n\n match = LZ4_getPosition(ip, cctx->hashTable, tableType);\n LZ4_putPosition(ip, cctx->hashTable, tableType);\n if ( (match+LZ4_DISTANCE_MAX >= ip)\n && (LZ4_read32(match) == LZ4_read32(ip)) )\n { token=op++; *token=0; goto _next_match; }\n\n } else { /* byU32, byU16 */\n\n U32 const h = LZ4_hashPosition(ip, tableType);\n U32 const current = (U32)(ip-base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n matchIndex += dictDelta;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else if (dictDirective==usingExtDict) {\n if (matchIndex < startIndex) {\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else { /* single memory segment */\n match = base + matchIndex;\n }\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)\n && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))\n && (LZ4_read32(match) == LZ4_read32(ip)) ) {\n token=op++;\n *token=0;\n if (maybe_extMem) offset = current - matchIndex;\n DEBUGLOG(6, \"seq.start:%i, literals=%u, match.start:%i\",\n (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));\n goto _next_match;\n }\n }\n\n /* Prepare next loop */\n forwardH = LZ4_hashPosition(++ip, tableType);\n\n }\n\n_last_literals:\n /* Encode Last Literals */\n { size_t lastRun = (size_t)(iend - anchor);\n if ( (outputDirective) && /* Check output buffer overflow */\n (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {\n if (outputDirective == fillOutput) {\n /* adapt lastRun to fill 'dst' */\n assert(olimit >= op);\n lastRun = (size_t)(olimit-op) - 1/*token*/;\n lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n DEBUGLOG(6, \"Final literal run : %i literals\", (int)lastRun);\n if (lastRun >= RUN_MASK) {\n size_t accumulator = lastRun - RUN_MASK;\n *op++ = RUN_MASK << ML_BITS;\n for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;\n *op++ = (BYTE) accumulator;\n } else {\n *op++ = (BYTE)(lastRun< 0);\n DEBUGLOG(5, \"LZ4_compress_generic: compressed %i bytes into %i bytes\", inputSize, result);\n return result;\n}\n\n/** LZ4_compress_generic() :\n * inlined, to ensure branches are decided at compilation time;\n * takes care of src == (NULL, 0)\n * and forward the rest to LZ4_compress_generic_validated */\nLZ4_FORCE_INLINE int LZ4_compress_generic(\n LZ4_stream_t_internal* const cctx,\n const char* const src,\n char* const dst,\n const int srcSize,\n int *inputConsumed, /* only written when outputDirective == fillOutput */\n const int dstCapacity,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n DEBUGLOG(5, \"LZ4_compress_generic: srcSize=%i, dstCapacity=%i\",\n srcSize, dstCapacity);\n\n if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */\n if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */\n if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */\n DEBUGLOG(5, \"Generating an empty block\");\n assert(outputDirective == notLimited || dstCapacity >= 1);\n assert(dst != NULL);\n dst[0] = 0;\n if (outputDirective == fillOutput) {\n assert (inputConsumed != NULL);\n *inputConsumed = 0;\n }\n return 1;\n }\n assert(src != NULL);\n\n return LZ4_compress_generic_validated(cctx, src, dst, srcSize,\n inputConsumed, /* only written into if outputDirective == fillOutput */\n dstCapacity, outputDirective,\n tableType, dictDirective, dictIssue, acceleration);\n}\n\n\nint LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;\n assert(ctx != NULL);\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n if (maxOutputSize >= LZ4_compressBound(inputSize)) {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n/**\n * LZ4_compress_fast_extState_fastReset() :\n * A variant of LZ4_compress_fast_extState().\n *\n * Using this variant avoids an expensive initialization step. It is only safe\n * to call if the state buffer is known to be correctly initialized already\n * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of\n * \"correctly initialized\").\n */\nint LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n assert(ctx != NULL);\n\n if (dstCapacity >= LZ4_compressBound(srcSize)) {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n\nint LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)\n{\n int result;\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctxPtr == NULL) return 0;\n#else\n LZ4_stream_t ctx;\n LZ4_stream_t* const ctxPtr = &ctx;\n#endif\n result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctxPtr);\n#endif\n return result;\n}\n\n\nint LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);\n}\n\n\n/* Note!: This function leaves the stream in an unclean/broken state!\n * It is not safe to subsequently use the same state with a _fastReset() or\n * _continue() call without resetting it. */\nstatic int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n void* const s = LZ4_initStream(state, sizeof (*state));\n assert(s != NULL); (void)s;\n\n if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */\n return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);\n } else {\n if (*srcSizePtr < LZ4_64Klimit) {\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);\n } }\n}\n\nint LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);\n /* clean the state on exit */\n LZ4_initStream(state, sizeof (LZ4_stream_t));\n return r;\n}\n\n\nint LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)\n{\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctx == NULL) return 0;\n#else\n LZ4_stream_t ctxBody;\n LZ4_stream_t* const ctx = &ctxBody;\n#endif\n\n int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctx);\n#endif\n return result;\n}\n\n\n\n/*-******************************\n* Streaming functions\n********************************/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_stream_t* LZ4_createStream(void)\n{\n LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));\n LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));\n DEBUGLOG(4, \"LZ4_createStream %p\", lz4s);\n if (lz4s == NULL) return NULL;\n LZ4_initStream(lz4s, sizeof(*lz4s));\n return lz4s;\n}\n#endif\n\nstatic size_t LZ4_stream_t_alignment(void)\n{\n#if LZ4_ALIGN_TEST\n typedef struct { char c; LZ4_stream_t t; } t_a;\n return sizeof(t_a) - sizeof(LZ4_stream_t);\n#else\n return 1; /* effectively disabled */\n#endif\n}\n\nLZ4_stream_t* LZ4_initStream (void* buffer, size_t size)\n{\n DEBUGLOG(5, \"LZ4_initStream\");\n if (buffer == NULL) { return NULL; }\n if (size < sizeof(LZ4_stream_t)) { return NULL; }\n if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;\n MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));\n return (LZ4_stream_t*)buffer;\n}\n\n/* resetStream is now deprecated,\n * prefer initStream() which is more general */\nvoid LZ4_resetStream (LZ4_stream_t* LZ4_stream)\n{\n DEBUGLOG(5, \"LZ4_resetStream (ctx:%p)\", LZ4_stream);\n MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));\n}\n\nvoid LZ4_resetStream_fast(LZ4_stream_t* ctx) {\n LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nint LZ4_freeStream (LZ4_stream_t* LZ4_stream)\n{\n if (!LZ4_stream) return 0; /* support free on NULL */\n DEBUGLOG(5, \"LZ4_freeStream %p\", LZ4_stream);\n FREEMEM(LZ4_stream);\n return (0);\n}\n#endif\n\n\ntypedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;\n#define HASH_UNIT sizeof(reg_t)\nint LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,\n const char* dictionary, int dictSize,\n LoadDict_mode_e _ld)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n const tableType_t tableType = byU32;\n const BYTE* p = (const BYTE*)dictionary;\n const BYTE* const dictEnd = p + dictSize;\n U32 idx32;\n\n DEBUGLOG(4, \"LZ4_loadDict (%i bytes from %p into %p)\", dictSize, dictionary, LZ4_dict);\n\n /* It's necessary to reset the context,\n * and not just continue it with prepareTable()\n * to avoid any risk of generating overflowing matchIndex\n * when compressing using this dictionary */\n LZ4_resetStream(LZ4_dict);\n\n /* We always increment the offset by 64 KB, since, if the dict is longer,\n * we truncate it to the last 64k, and if it's shorter, we still want to\n * advance by a whole window length so we can provide the guarantee that\n * there are only valid offsets in the window, which allows an optimization\n * in LZ4_compress_fast_continue() where it uses noDictIssue even when the\n * dictionary isn't a full 64k. */\n dict->currentOffset += 64 KB;\n\n if (dictSize < (int)HASH_UNIT) {\n return 0;\n }\n\n if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;\n dict->dictionary = p;\n dict->dictSize = (U32)(dictEnd - p);\n dict->tableType = (U32)tableType;\n idx32 = dict->currentOffset - dict->dictSize;\n\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n /* Note: overwriting => favors positions end of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n p+=3; idx32+=3;\n }\n\n if (_ld == _ld_slow) {\n /* Fill hash table with additional references, to improve compression capability */\n p = dict->dictionary;\n idx32 = dict->currentOffset - dict->dictSize;\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n U32 const limit = dict->currentOffset - 64 KB;\n if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {\n /* Note: not overwriting => favors positions beginning of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n }\n p++; idx32++;\n }\n }\n\n return (int)dict->dictSize;\n}\n\nint LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);\n}\n\nint LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);\n}\n\nvoid LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)\n{\n const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :\n &(dictionaryStream->internal_donotuse);\n\n DEBUGLOG(4, \"LZ4_attach_dictionary (%p, %p, size %u)\",\n workingStream, dictionaryStream,\n dictCtx != NULL ? dictCtx->dictSize : 0);\n\n if (dictCtx != NULL) {\n /* If the current offset is zero, we will never look in the\n * external dictionary context, since there is no value a table\n * entry can take that indicate a miss. In that case, we need\n * to bump the offset to something non-zero.\n */\n if (workingStream->internal_donotuse.currentOffset == 0) {\n workingStream->internal_donotuse.currentOffset = 64 KB;\n }\n\n /* Don't actually attach an empty dictionary.\n */\n if (dictCtx->dictSize == 0) {\n dictCtx = NULL;\n }\n }\n workingStream->internal_donotuse.dictCtx = dictCtx;\n}\n\n\nstatic void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)\n{\n assert(nextSize >= 0);\n if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */\n /* rescale hash table */\n U32 const delta = LZ4_dict->currentOffset - 64 KB;\n const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;\n int i;\n DEBUGLOG(4, \"LZ4_renormDictT\");\n for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0;\n else LZ4_dict->hashTable[i] -= delta;\n }\n LZ4_dict->currentOffset = 64 KB;\n if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;\n LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;\n }\n}\n\n\nint LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,\n const char* source, char* dest,\n int inputSize, int maxOutputSize,\n int acceleration)\n{\n const tableType_t tableType = byU32;\n LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;\n const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;\n\n DEBUGLOG(5, \"LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)\", inputSize, streamPtr->dictSize);\n\n LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n\n /* invalidate tiny dictionaries */\n if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */\n && (dictEnd != source) /* prefix mode */\n && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */\n && (streamPtr->dictCtx == NULL) /* usingDictCtx */\n ) {\n DEBUGLOG(5, \"LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small\", streamPtr->dictSize, streamPtr->dictionary);\n /* remove dictionary existence from history, to employ faster prefix mode */\n streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)source;\n dictEnd = source;\n }\n\n /* Check overlapping input/dictionary space */\n { const char* const sourceEnd = source + inputSize;\n if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {\n streamPtr->dictSize = (U32)(dictEnd - sourceEnd);\n if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;\n if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;\n }\n }\n\n /* prefix mode : source data follows dictionary */\n if (dictEnd == source) {\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);\n else\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);\n }\n\n /* external dictionary mode */\n { int result;\n if (streamPtr->dictCtx) {\n /* We depend here on the fact that dictCtx'es (produced by\n * LZ4_loadDict) guarantee that their tables contain no references\n * to offsets between dictCtx->currentOffset - 64 KB and\n * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe\n * to use noDictIssue even when the dict isn't a full 64 KB.\n */\n if (inputSize > 4 KB) {\n /* For compressing large blobs, it is faster to pay the setup\n * cost to copy the dictionary's tables into the active context,\n * so that the compression loop is only looking into one table.\n */\n LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);\n }\n } else { /* small data <= 4 KB */\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n }\n }\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)inputSize;\n return result;\n }\n}\n\n\n/* Hidden debug function, to force-test external dictionary mode */\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;\n int result;\n\n LZ4_renormDictT(streamPtr, srcSize);\n\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);\n }\n\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)srcSize;\n\n return result;\n}\n\n\n/*! LZ4_saveDict() :\n * If previously compressed data block is not guaranteed to remain available at its memory location,\n * save it into a safer place (char* safeBuffer).\n * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,\n * one can therefore call LZ4_compress_fast_continue() right after.\n * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.\n */\nint LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n\n DEBUGLOG(5, \"LZ4_saveDict : dictSize=%i, safeBuffer=%p\", dictSize, safeBuffer);\n\n if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */\n if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }\n\n if (safeBuffer == NULL) assert(dictSize == 0);\n if (dictSize > 0) {\n const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;\n assert(dict->dictionary);\n LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);\n }\n\n dict->dictionary = (const BYTE*)safeBuffer;\n dict->dictSize = (U32)dictSize;\n\n return dictSize;\n}\n\n\n\n/*-*******************************\n * Decompression functions\n ********************************/\n\ntypedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;\n\n#undef MIN\n#define MIN(a,b) ( (a) < (b) ? (a) : (b) )\n\n\n/* variant for decompress_unsafe()\n * does not know end of input\n * presumes input is well formed\n * note : will consume at least one byte */\nstatic size_t read_long_length_no_check(const BYTE** pp)\n{\n size_t b, l = 0;\n do { b = **pp; (*pp)++; l += b; } while (b==255);\n DEBUGLOG(6, \"read_long_length_no_check: +length=%zu using %zu input bytes\", l, l/255 + 1)\n return l;\n}\n\n/* core decoder variant for LZ4_decompress_fast*()\n * for legacy support only : these entry points are deprecated.\n * - Presumes input is correctly formed (no defense vs malformed inputs)\n * - Does not know input size (presume input buffer is \"large enough\")\n * - Decompress a full block (only)\n * @return : nb of bytes read from input.\n * Note : this variant is not optimized for speed, just for maintenance.\n * the goal is to remove support of decompress_fast*() variants by v2.0\n**/\nLZ4_FORCE_INLINE int\nLZ4_decompress_unsafe_generic(\n const BYTE* const istart,\n BYTE* const ostart,\n int decompressedSize,\n\n size_t prefixSize,\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note: =0 if dictStart==NULL */\n )\n{\n const BYTE* ip = istart;\n BYTE* op = (BYTE*)ostart;\n BYTE* const oend = ostart + decompressedSize;\n const BYTE* const prefixStart = ostart - prefixSize;\n\n DEBUGLOG(5, \"LZ4_decompress_unsafe_generic\");\n if (dictStart == NULL) assert(dictSize == 0);\n\n while (1) {\n /* start new sequence */\n unsigned token = *ip++;\n\n /* literals */\n { size_t ll = token >> ML_BITS;\n if (ll==15) {\n /* long literal length */\n ll += read_long_length_no_check(&ip);\n }\n if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */\n LZ4_memmove(op, ip, ll); /* support in-place decompression */\n op += ll;\n ip += ll;\n if ((size_t)(oend-op) < MFLIMIT) {\n if (op==oend) break; /* end of block */\n DEBUGLOG(5, \"invalid: literals end at distance %zi from end of block\", oend-op);\n /* incorrect end of block :\n * last match must start at least MFLIMIT==12 bytes before end of output block */\n return -1;\n } }\n\n /* match */\n { size_t ml = token & 15;\n size_t const offset = LZ4_readLE16(ip);\n ip+=2;\n\n if (ml==15) {\n /* long literal length */\n ml += read_long_length_no_check(&ip);\n }\n ml += MINMATCH;\n\n if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */\n\n { const BYTE* match = op - offset;\n\n /* out of range */\n if (offset > (size_t)(op - prefixStart) + dictSize) {\n DEBUGLOG(6, \"offset out of range\");\n return -1;\n }\n\n /* check special case : extDict */\n if (offset > (size_t)(op - prefixStart)) {\n /* extDict scenario */\n const BYTE* const dictEnd = dictStart + dictSize;\n const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));\n size_t const extml = (size_t)(dictEnd - extMatch);\n if (extml > ml) {\n /* match entirely within extDict */\n LZ4_memmove(op, extMatch, ml);\n op += ml;\n ml = 0;\n } else {\n /* match split between extDict & prefix */\n LZ4_memmove(op, extMatch, extml);\n op += extml;\n ml -= extml;\n }\n match = prefixStart;\n }\n\n /* match copy - slow variant, supporting overlap copy */\n { size_t u;\n for (u=0; u= ipmax before start of loop. Returns initial_error if so.\n * @error (output) - error code. Must be set to 0 before call.\n**/\ntypedef size_t Rvl_t;\nstatic const Rvl_t rvl_error = (Rvl_t)(-1);\nLZ4_FORCE_INLINE Rvl_t\nread_variable_length(const BYTE** ip, const BYTE* ilimit,\n int initial_check)\n{\n Rvl_t s, length = 0;\n assert(ip != NULL);\n assert(*ip != NULL);\n assert(ilimit != NULL);\n if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */\n return rvl_error;\n }\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n if (likely(s != 255)) return length;\n do {\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n } while (s == 255);\n\n return length;\n}\n\n/*! LZ4_decompress_generic() :\n * This generic decompression function covers all use cases.\n * It shall be instantiated several times, using different sets of directives.\n * Note that it is important for performance that this function really get inlined,\n * in order to remove useless branches during compilation optimization.\n */\nLZ4_FORCE_INLINE int\nLZ4_decompress_generic(\n const char* const src,\n char* const dst,\n int srcSize,\n int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */\n\n earlyEnd_directive partialDecoding, /* full, partial */\n dict_directive dict, /* noDict, withPrefix64k, usingExtDict */\n const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note : = 0 if noDict */\n )\n{\n if ((src == NULL) || (outputSize < 0)) { return -1; }\n\n { const BYTE* ip = (const BYTE*) src;\n const BYTE* const iend = ip + srcSize;\n\n BYTE* op = (BYTE*) dst;\n BYTE* const oend = op + outputSize;\n BYTE* cpy;\n\n const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;\n\n const int checkOffset = (dictSize < (int)(64 KB));\n\n\n /* Set up the \"end\" pointers for the shortcut. */\n const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;\n const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;\n\n const BYTE* match;\n size_t offset;\n unsigned token;\n size_t length;\n\n\n DEBUGLOG(5, \"LZ4_decompress_generic (srcSize:%i, dstSize:%i)\", srcSize, outputSize);\n\n /* Special cases */\n assert(lowPrefix <= op);\n if (unlikely(outputSize==0)) {\n /* Empty output buffer */\n if (partialDecoding) return 0;\n return ((srcSize==1) && (*ip==0)) ? 0 : -1;\n }\n if (unlikely(srcSize==0)) { return -1; }\n\n /* LZ4_FAST_DEC_LOOP:\n * designed for modern OoO performance cpus,\n * where copying reliably 32-bytes is preferable to an unpredictable branch.\n * note : fast loop may show a regression for some client arm chips. */\n#if LZ4_FAST_DEC_LOOP\n if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(6, \"move to safe decode loop\");\n goto safe_decode;\n }\n\n /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using fast decode loop\");\n while (1) {\n /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */\n assert(oend - op >= FASTLOOP_SAFE_DISTANCE);\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) {\n DEBUGLOG(6, \"error reading long literal length\");\n goto _output_error;\n }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n\n /* copy literals */\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }\n LZ4_wildCopy32(op, ip, op+length);\n ip += length; op += length;\n } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {\n /* We don't need to check oend, since we check it once for each loop below */\n DEBUGLOG(7, \"copy %u bytes in a 16-bytes stripe\", (unsigned)length);\n /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */\n LZ4_memcpy(op, ip, 16);\n ip += length; op += length;\n } else {\n goto safe_literal_copy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n DEBUGLOG(6, \"blockPos%6u: offset = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)offset);\n match = op - offset;\n assert(match <= op); /* overflow check */\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \" match length token = %u (len==%u)\", (unsigned)length, (unsigned)length+MINMATCH);\n\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) {\n DEBUGLOG(5, \"error reading long match length\");\n goto _output_error;\n }\n length += addl;\n length += MINMATCH;\n DEBUGLOG(7, \" long match length == %u\", (unsigned)length);\n if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n goto safe_match_copy;\n }\n } else {\n length += MINMATCH;\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(7, \"moving to safe_match_copy (ml==%u)\", (unsigned)length);\n goto safe_match_copy;\n }\n\n /* Fastpath check: skip LZ4_wildCopy32 when true */\n if ((dict == withPrefix64k) || (match >= lowPrefix)) {\n if (offset >= 8) {\n assert(match >= lowPrefix);\n assert(match <= op);\n assert(op + 18 <= oend);\n\n LZ4_memcpy(op, match, 8);\n LZ4_memcpy(op+8, match+8, 8);\n LZ4_memcpy(op+16, match+16, 2);\n op += length;\n continue;\n } } }\n\n if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {\n DEBUGLOG(5, \"Error : pos=%zi, offset=%zi => outside buffers\", op-lowPrefix, op-match);\n goto _output_error;\n }\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) {\n DEBUGLOG(7, \"partialDecoding: dictionary match, close to dstEnd\");\n length = MIN(length, (size_t)(oend-op));\n } else {\n DEBUGLOG(6, \"end-of-block condition violated\")\n goto _output_error;\n } }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) { *op++ = *copyFrom++; }\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n\n /* copy match within block */\n cpy = op + length;\n\n assert((op <= oend) && (oend-op >= 32));\n if (unlikely(offset<16)) {\n LZ4_memcpy_using_offset(op, match, cpy, offset);\n } else {\n LZ4_wildCopy32(op, match, cpy);\n }\n\n op = cpy; /* wildcopy correction */\n }\n safe_decode:\n#endif\n\n /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using safe decode loop\");\n while (1) {\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* A two-stage shortcut for the most common case:\n * 1) If the literal length is 0..14, and there is enough space,\n * enter the shortcut and copy 16 bytes on behalf of the literals\n * (in the fast mode, only 8 bytes can be safely copied this way).\n * 2) Further if the match length is 4..18, copy 18 bytes in a similar\n * manner; but we ensure that there's enough space in the output for\n * those 18 bytes earlier, upon entering the shortcut (in other words,\n * there is a combined check for both stages).\n */\n if ( (length != RUN_MASK)\n /* strictly \"less than\" on input, to re-enter the loop with at least one byte */\n && likely((ip < shortiend) & (op <= shortoend)) ) {\n /* Copy the literals */\n LZ4_memcpy(op, ip, 16);\n op += length; ip += length;\n\n /* The second stage: prepare for match copying, decode full info.\n * If it doesn't work out, the info won't be wasted. */\n length = token & ML_MASK; /* match length */\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u (len=%u)\", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);\n offset = LZ4_readLE16(ip); ip += 2;\n match = op - offset;\n assert(match <= op); /* check overflow */\n\n /* Do not deal with overlapping matches. */\n if ( (length != ML_MASK)\n && (offset >= 8)\n && (dict==withPrefix64k || match >= lowPrefix) ) {\n /* Copy the match. */\n LZ4_memcpy(op + 0, match + 0, 8);\n LZ4_memcpy(op + 8, match + 8, 8);\n LZ4_memcpy(op +16, match +16, 2);\n op += length + MINMATCH;\n /* Both stages worked, load the next token. */\n continue;\n }\n\n /* The second stage didn't work out, but the info is ready.\n * Propel it right to the point of match copying. */\n goto _copy_match;\n }\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n }\n\n#if LZ4_FAST_DEC_LOOP\n safe_literal_copy:\n#endif\n /* copy literals */\n cpy = op+length;\n\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {\n /* We've either hit the input parsing restriction or the output parsing restriction.\n * In the normal scenario, decoding a full block, it must be the last sequence,\n * otherwise it's an error (invalid input or dimensions).\n * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.\n */\n if (partialDecoding) {\n /* Since we are partial decoding we may be in this block because of the output parsing\n * restriction, which is not valid since the output buffer is allowed to be undersized.\n */\n DEBUGLOG(7, \"partialDecoding: copying literals, close to input or output end\")\n DEBUGLOG(7, \"partialDecoding: literal length = %u\", (unsigned)length);\n DEBUGLOG(7, \"partialDecoding: remaining space in dstBuffer : %i\", (int)(oend - op));\n DEBUGLOG(7, \"partialDecoding: remaining space in srcBuffer : %i\", (int)(iend - ip));\n /* Finishing in the middle of a literals segment,\n * due to lack of input.\n */\n if (ip+length > iend) {\n length = (size_t)(iend-ip);\n cpy = op + length;\n }\n /* Finishing in the middle of a literals segment,\n * due to lack of output space.\n */\n if (cpy > oend) {\n cpy = oend;\n assert(op<=oend);\n length = (size_t)(oend-op);\n }\n } else {\n /* We must be on the last sequence (or invalid) because of the parsing limitations\n * so check that we exactly consume the input and don't overrun the output buffer.\n */\n if ((ip+length != iend) || (cpy > oend)) {\n DEBUGLOG(5, \"should have been last run of literals\")\n DEBUGLOG(5, \"ip(%p) + length(%i) = %p != iend (%p)\", ip, (int)length, ip+length, iend);\n DEBUGLOG(5, \"or cpy(%p) > (oend-MFLIMIT)(%p)\", cpy, oend-MFLIMIT);\n DEBUGLOG(5, \"after writing %u bytes / %i bytes available\", (unsigned)(op-(BYTE*)dst), outputSize);\n goto _output_error;\n }\n }\n LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */\n ip += length;\n op += length;\n /* Necessarily EOF when !partialDecoding.\n * When partialDecoding, it is EOF if we've either\n * filled the output buffer or\n * can't proceed with reading an offset for following match.\n */\n if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {\n break;\n }\n } else {\n LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */\n ip += length; op = cpy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n match = op - offset;\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n _copy_match:\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */\n }\n length += MINMATCH;\n\n#if LZ4_FAST_DEC_LOOP\n safe_match_copy:\n#endif\n if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) length = MIN(length, (size_t)(oend-op));\n else goto _output_error; /* doesn't respect parsing restriction */\n }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) *op++ = *copyFrom++;\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n assert(match >= lowPrefix);\n\n /* copy match within block */\n cpy = op + length;\n\n /* partialDecoding : may end anywhere within the block */\n assert(op<=oend);\n if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n size_t const mlen = MIN(length, (size_t)(oend-op));\n const BYTE* const matchEnd = match + mlen;\n BYTE* const copyEnd = op + mlen;\n if (matchEnd > op) { /* overlap copy */\n while (op < copyEnd) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, mlen);\n }\n op = copyEnd;\n if (op == oend) { break; }\n continue;\n }\n\n if (unlikely(offset<8)) {\n LZ4_write32(op, 0); /* silence msan warning when offset==0 */\n op[0] = match[0];\n op[1] = match[1];\n op[2] = match[2];\n op[3] = match[3];\n match += inc32table[offset];\n LZ4_memcpy(op+4, match, 4);\n match -= dec64table[offset];\n } else {\n LZ4_memcpy(op, match, 8);\n match += 8;\n }\n op += 8;\n\n if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);\n if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */\n if (op < oCopyLimit) {\n LZ4_wildCopy8(op, match, oCopyLimit);\n match += oCopyLimit - op;\n op = oCopyLimit;\n }\n while (op < cpy) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, 8);\n if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }\n }\n op = cpy; /* wildcopy correction */\n }\n\n /* end of decoding */\n DEBUGLOG(5, \"decoded %i bytes\", (int) (((char*)op)-dst));\n return (int) (((char*)op)-dst); /* Nb of output bytes decoded */\n\n /* Overflow error detected */\n _output_error:\n return (int) (-(((const char*)ip)-src))-1;\n }\n}\n\n\n/*===== Instantiate the API decoding functions. =====*/\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,\n decode_full_block, noDict,\n (BYTE*)dest, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,\n partial_decode,\n noDict, (BYTE*)dst, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_fast(const char* source, char* dest, int originalSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_fast\");\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, NULL, 0);\n}\n\n/*===== Instantiate a few more decoding cases, used more than once. =====*/\n\nLZ4_FORCE_O2 /* Exported, an obsolete API function. */\nint LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\n/* Another obsolete API function, paired with the previous one. */\nint LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,\n size_t prefixSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_safe_forceExtDict\");\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,\n const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, (const BYTE*)dictStart, dictSize);\n}\n\n/* The \"double dictionary\" mode, for use with e.g. ring buffers: the first part\n * of the dictionary is passed as prefix, and the second via dictStart + dictSize.\n * These routines are used only once, in LZ4_decompress_*_continue().\n */\nLZ4_FORCE_INLINE\nint LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize, const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);\n}\n\n/*===== streaming decompression functions =====*/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_streamDecode_t* LZ4_createStreamDecode(void)\n{\n LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));\n return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));\n}\n\nint LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)\n{\n if (LZ4_stream == NULL) { return 0; } /* support free on NULL */\n FREEMEM(LZ4_stream);\n return 0;\n}\n#endif\n\n/*! LZ4_setStreamDecode() :\n * Use this function to instruct where to find the dictionary.\n * This function is not necessary if previous data is still available where it was decoded.\n * Loading a size of 0 is allowed (same effect as no dictionary).\n * @return : 1 if OK, 0 if error\n */\nint LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n lz4sd->prefixSize = (size_t)dictSize;\n if (dictSize) {\n assert(dictionary != NULL);\n lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;\n } else {\n lz4sd->prefixEnd = (const BYTE*) dictionary;\n }\n lz4sd->externalDict = NULL;\n lz4sd->extDictSize = 0;\n return 1;\n}\n\n/*! LZ4_decoderRingBufferSize() :\n * when setting a ring buffer for streaming decompression (optional scenario),\n * provides the minimum size of this ring buffer\n * to be compatible with any source respecting maxBlockSize condition.\n * Note : in a ring buffer scenario,\n * blocks are presumed decompressed next to each other.\n * When not enough space remains for next block (remainingSize < maxBlockSize),\n * decoding resumes from beginning of ring buffer.\n * @return : minimum ring buffer size,\n * or 0 if there is an error (invalid maxBlockSize).\n */\nint LZ4_decoderRingBufferSize(int maxBlockSize)\n{\n if (maxBlockSize < 0) return 0;\n if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;\n if (maxBlockSize < 16) maxBlockSize = 16;\n return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);\n}\n\n/*\n*_continue() :\n These decoding functions allow decompression of multiple blocks in \"streaming\" mode.\n Previously decoded blocks must still be available at the memory position where they were decoded.\n If it's not possible, save the relevant part of decoded data into a safe buffer,\n and indicate where it stands using LZ4_setStreamDecode()\n*/\nLZ4_FORCE_O2\nint LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n int result;\n\n if (lz4sd->prefixSize == 0) {\n /* The first call, no dictionary yet. */\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n /* They're rolling the current segment. */\n if (lz4sd->prefixSize >= 64 KB - 1)\n result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n else if (lz4sd->extDictSize == 0)\n result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize);\n else\n result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)result;\n lz4sd->prefixEnd += result;\n } else {\n /* The buffer wraps around, or they're switching to another buffer. */\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n }\n\n return result;\n}\n\nLZ4_FORCE_O2 int\nLZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,\n const char* source, char* dest, int originalSize)\n{\n LZ4_streamDecode_t_internal* const lz4sd =\n (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);\n int result;\n\n DEBUGLOG(5, \"LZ4_decompress_fast_continue (toDecodeSize=%i)\", originalSize);\n assert(originalSize >= 0);\n\n if (lz4sd->prefixSize == 0) {\n DEBUGLOG(5, \"first invocation : no prefix nor extDict\");\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_fast(source, dest, originalSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n DEBUGLOG(5, \"continue using existing prefix\");\n result = LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n lz4sd->prefixSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)originalSize;\n lz4sd->prefixEnd += originalSize;\n } else {\n DEBUGLOG(5, \"prefix becomes extDict\");\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_fast_extDict(source, dest, originalSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n }\n\n return result;\n}\n\n\n/*\nAdvanced decoding functions :\n*_usingDict() :\n These decoding functions work the same as \"_continue\" ones,\n the dictionary must be explicitly provided within parameters\n*/\n\nint LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0 || dictStart+dictSize == dest)\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n (size_t)dictSize, NULL, 0);\n assert(dictSize >= 0);\n return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);\n}\n\n\n/*=*************************************************\n* Obsolete Functions\n***************************************************/\n/* obsolete compression functions */\nint LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)\n{\n return LZ4_compress_default(source, dest, inputSize, maxOutputSize);\n}\nint LZ4_compress(const char* src, char* dest, int srcSize)\n{\n return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));\n}\nint LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);\n}\nint LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);\n}\nint LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);\n}\nint LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)\n{\n return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);\n}\n\n/*\nThese decompression functions are deprecated and should no longer be used.\nThey are only provided here for compatibility with older user programs.\n- LZ4_uncompress is totally equivalent to LZ4_decompress_fast\n- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe\n*/\nint LZ4_uncompress (const char* source, char* dest, int outputSize)\n{\n return LZ4_decompress_fast(source, dest, outputSize);\n}\nint LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)\n{\n return LZ4_decompress_safe(source, dest, isize, maxOutputSize);\n}\n\n/* Obsolete Streaming functions */\n\nint LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }\n\nint LZ4_resetStreamState(void* state, char* inputBuffer)\n{\n (void)inputBuffer;\n LZ4_resetStream((LZ4_stream_t*)state);\n return 0;\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nvoid* LZ4_create (char* inputBuffer)\n{\n (void)inputBuffer;\n return LZ4_createStream();\n}\n#endif\n\nchar* LZ4_slideInputBuffer (void* state)\n{\n /* avoid const char * -> char * conversion warning */\n return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;\n}\n\n#endif /* LZ4_COMMONDEFS_ONLY */\n"], ["/pogocache/src/hashmap.c", "// Copyright 2020 Joshua J Baker. All rights reserved.\n// Use of this source code is governed by an MIT-style\n// license that can be found in the LICENSE file.\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\n#define GROW_AT 0.60 /* 60% */\n#define SHRINK_AT 0.10 /* 10% */\n\n#ifndef HASHMAP_LOAD_FACTOR\n#define HASHMAP_LOAD_FACTOR GROW_AT\n#endif\n\nstatic void *(*__malloc)(size_t) = NULL;\nstatic void *(*__realloc)(void *, size_t) = NULL;\nstatic void (*__free)(void *) = NULL;\n\n// hashmap_set_allocator allows for configuring a custom allocator for\n// all hashmap library operations. This function, if needed, should be called\n// only once at startup and a prior to calling hashmap_new().\nvoid hashmap_set_allocator(void *(*malloc)(size_t), void (*free)(void*)) {\n __malloc = malloc;\n __free = free;\n}\n\nstruct bucket {\n uint64_t hash:48;\n uint64_t dib:16;\n};\n\n// hashmap is an open addressed hash map using robinhood hashing.\nstruct hashmap {\n void *(*malloc)(size_t);\n void *(*realloc)(void *, size_t);\n void (*free)(void *);\n size_t elsize;\n size_t cap;\n uint64_t seed0;\n uint64_t seed1;\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1);\n int (*compare)(const void *a, const void *b, void *udata);\n void (*elfree)(void *item);\n void *udata;\n size_t bucketsz;\n size_t nbuckets;\n size_t count;\n size_t mask;\n size_t growat;\n size_t shrinkat;\n uint8_t loadfactor;\n uint8_t growpower;\n bool oom;\n void *buckets;\n void *spare;\n void *edata;\n};\n\nvoid hashmap_set_grow_by_power(struct hashmap *map, size_t power) {\n map->growpower = power < 1 ? 1 : power > 16 ? 16 : power;\n}\n\nstatic double clamp_load_factor(double factor, double default_factor) {\n // Check for NaN and clamp between 50% and 90%\n return factor != factor ? default_factor : \n factor < 0.50 ? 0.50 : \n factor > 0.95 ? 0.95 : \n factor;\n}\n\nvoid hashmap_set_load_factor(struct hashmap *map, double factor) {\n factor = clamp_load_factor(factor, map->loadfactor / 100.0);\n map->loadfactor = factor * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n}\n\nstatic struct bucket *bucket_at0(void *buckets, size_t bucketsz, size_t i) {\n return (struct bucket*)(((char*)buckets)+(bucketsz*i));\n}\n\nstatic struct bucket *bucket_at(struct hashmap *map, size_t index) {\n return bucket_at0(map->buckets, map->bucketsz, index);\n}\n\nstatic void *bucket_item(struct bucket *entry) {\n return ((char*)entry)+sizeof(struct bucket);\n}\n\nstatic uint64_t clip_hash(uint64_t hash) {\n return hash & 0xFFFFFFFFFFFF;\n}\n\nstatic uint64_t get_hash(struct hashmap *map, const void *key) {\n return clip_hash(map->hash(key, map->seed0, map->seed1));\n}\n\n\n// hashmap_new_with_allocator returns a new hash map using a custom allocator.\n// See hashmap_new for more information information\nstruct hashmap *hashmap_new_with_allocator(void *(*_malloc)(size_t), \n void *(*_realloc)(void*, size_t), void (*_free)(void*),\n size_t elsize, size_t cap, uint64_t seed0, uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n _malloc = _malloc ? _malloc : __malloc ? __malloc : malloc;\n _realloc = _realloc ? _realloc : __realloc ? __realloc : realloc;\n _free = _free ? _free : __free ? __free : free;\n size_t ncap = 16;\n if (cap < ncap) {\n cap = ncap;\n } else {\n while (ncap < cap) {\n ncap *= 2;\n }\n cap = ncap;\n }\n size_t bucketsz = sizeof(struct bucket) + elsize;\n while (bucketsz & (sizeof(uintptr_t)-1)) {\n bucketsz++;\n }\n // hashmap + spare + edata\n size_t size = sizeof(struct hashmap)+bucketsz*2;\n struct hashmap *map = _malloc(size);\n if (!map) {\n return NULL;\n }\n memset(map, 0, sizeof(struct hashmap));\n map->elsize = elsize;\n map->bucketsz = bucketsz;\n map->seed0 = seed0;\n map->seed1 = seed1;\n map->hash = hash;\n map->compare = compare;\n map->elfree = elfree;\n map->udata = udata;\n map->spare = ((char*)map)+sizeof(struct hashmap);\n map->edata = (char*)map->spare+bucketsz;\n map->cap = cap;\n map->nbuckets = cap;\n map->mask = map->nbuckets-1;\n map->buckets = _malloc(map->bucketsz*map->nbuckets);\n if (!map->buckets) {\n _free(map);\n return NULL;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->growpower = 1;\n map->loadfactor = clamp_load_factor(HASHMAP_LOAD_FACTOR, GROW_AT) * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n map->shrinkat = map->nbuckets * SHRINK_AT;\n map->malloc = _malloc;\n map->realloc = _realloc;\n map->free = _free;\n return map; \n}\n\n// hashmap_new returns a new hash map. \n// Param `elsize` is the size of each element in the tree. Every element that\n// is inserted, deleted, or retrieved will be this size.\n// Param `cap` is the default lower capacity of the hashmap. Setting this to\n// zero will default to 16.\n// Params `seed0` and `seed1` are optional seed values that are passed to the \n// following `hash` function. These can be any value you wish but it's often \n// best to use randomly generated values.\n// Param `hash` is a function that generates a hash value for an item. It's\n// important that you provide a good hash function, otherwise it will perform\n// poorly or be vulnerable to Denial-of-service attacks. This implementation\n// comes with two helper functions `hashmap_sip()` and `hashmap_murmur()`.\n// Param `compare` is a function that compares items in the tree. See the \n// qsort stdlib function for an example of how this function works.\n// The hashmap must be freed with hashmap_free(). \n// Param `elfree` is a function that frees a specific item. This should be NULL\n// unless you're storing some kind of reference data in the hash.\nstruct hashmap *hashmap_new(size_t elsize, size_t cap, uint64_t seed0, \n uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n return hashmap_new_with_allocator(NULL, NULL, NULL, elsize, cap, seed0, \n seed1, hash, compare, elfree, udata);\n}\n\nstatic void free_elements(struct hashmap *map) {\n if (map->elfree) {\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib) map->elfree(bucket_item(bucket));\n }\n }\n}\n\n// hashmap_clear quickly clears the map. \n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\n// When the update_cap is provided, the map's capacity will be updated to match\n// the currently number of allocated buckets. This is an optimization to ensure\n// that this operation does not perform any allocations.\nvoid hashmap_clear(struct hashmap *map, bool update_cap) {\n map->count = 0;\n free_elements(map);\n if (update_cap) {\n map->cap = map->nbuckets;\n } else if (map->nbuckets != map->cap) {\n void *new_buckets = map->malloc(map->bucketsz*map->cap);\n if (new_buckets) {\n map->free(map->buckets);\n map->buckets = new_buckets;\n }\n map->nbuckets = map->cap;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * (map->loadfactor / 100.0) ;\n map->shrinkat = map->nbuckets * SHRINK_AT;\n}\n\nstatic bool resize0(struct hashmap *map, size_t new_cap) {\n struct hashmap *map2 = hashmap_new_with_allocator(map->malloc, map->realloc, \n map->free, map->elsize, new_cap, map->seed0, map->seed1, map->hash, \n map->compare, map->elfree, map->udata);\n if (!map2) return false;\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *entry = bucket_at(map, i);\n if (!entry->dib) {\n continue;\n }\n entry->dib = 1;\n size_t j = entry->hash & map2->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map2, j);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n break;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map2->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map2->spare, map->bucketsz);\n }\n j = (j + 1) & map2->mask;\n entry->dib += 1;\n }\n }\n map->free(map->buckets);\n map->buckets = map2->buckets;\n map->nbuckets = map2->nbuckets;\n map->mask = map2->mask;\n map->growat = map2->growat;\n map->shrinkat = map2->shrinkat;\n map->free(map2);\n return true;\n}\n\nstatic bool resize(struct hashmap *map, size_t new_cap) {\n return resize0(map, new_cap);\n}\n\n// hashmap_set_with_hash works like hashmap_set but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_set_with_hash(struct hashmap *map, const void *item,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*(1<growpower))) {\n map->oom = true;\n return NULL;\n }\n }\n\n struct bucket *entry = map->edata;\n entry->hash = hash;\n entry->dib = 1;\n void *eitem = bucket_item(entry);\n memcpy(eitem, item, map->elsize);\n\n void *bitem;\n size_t i = entry->hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n map->count++;\n return NULL;\n }\n bitem = bucket_item(bucket);\n if (entry->hash == bucket->hash && (!map->compare ||\n map->compare(eitem, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n memcpy(bitem, eitem, map->elsize);\n return map->spare;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map->spare, map->bucketsz);\n eitem = bucket_item(entry);\n }\n i = (i + 1) & map->mask;\n entry->dib += 1;\n }\n}\n\n// hashmap_set inserts or replaces an item in the hash map. If an item is\n// replaced then it is returned otherwise NULL is returned. This operation\n// may allocate memory. If the system is unable to allocate additional\n// memory then NULL is returned and hashmap_oom() returns true.\nconst void *hashmap_set(struct hashmap *map, const void *item) {\n return hashmap_set_with_hash(map, item, get_hash(map, item));\n}\n\n// hashmap_get_with_hash works like hashmap_get but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_get_with_hash(struct hashmap *map, const void *key, \n uint64_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) return NULL;\n if (bucket->hash == hash) {\n void *bitem = bucket_item(bucket);\n if (!map->compare || map->compare(key, bitem, map->udata) == 0) {\n return bitem;\n }\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_get returns the item based on the provided key. If the item is not\n// found then NULL is returned.\nconst void *hashmap_get(struct hashmap *map, const void *key) {\n return hashmap_get_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_probe returns the item in the bucket at position or NULL if an item\n// is not set for that bucket. The position is 'moduloed' by the number of \n// buckets in the hashmap.\nconst void *hashmap_probe(struct hashmap *map, uint64_t position) {\n size_t i = position & map->mask;\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n return bucket_item(bucket);\n}\n\n// hashmap_delete_with_hash works like hashmap_delete but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_delete_with_hash(struct hashmap *map, const void *key,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n void *bitem = bucket_item(bucket);\n if (bucket->hash == hash && (!map->compare ||\n map->compare(key, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n bucket->dib = 0;\n while(1) {\n struct bucket *prev = bucket;\n i = (i + 1) & map->mask;\n bucket = bucket_at(map, i);\n if (bucket->dib <= 1) {\n prev->dib = 0;\n break;\n }\n memcpy(prev, bucket, map->bucketsz);\n prev->dib--;\n }\n map->count--;\n if (map->nbuckets > map->cap && map->count <= map->shrinkat) {\n // Ignore the return value. It's ok for the resize operation to\n // fail to allocate enough memory because a shrink operation\n // does not change the integrity of the data.\n resize(map, map->nbuckets/2);\n }\n return map->spare;\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_delete removes an item from the hash map and returns it. If the\n// item is not found then NULL is returned.\nconst void *hashmap_delete(struct hashmap *map, const void *key) {\n return hashmap_delete_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_count returns the number of items in the hash map.\nsize_t hashmap_count(struct hashmap *map) {\n return map->count;\n}\n\n// hashmap_free frees the hash map\n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\nvoid hashmap_free(struct hashmap *map) {\n if (!map) return;\n free_elements(map);\n map->free(map->buckets);\n map->free(map);\n}\n\n// hashmap_oom returns true if the last hashmap_set() call failed due to the \n// system being out of memory.\nbool hashmap_oom(struct hashmap *map) {\n return map->oom;\n}\n\n// hashmap_scan iterates over all items in the hash map\n// Param `iter` can return false to stop iteration early.\n// Returns false if the iteration has been stopped early.\nbool hashmap_scan(struct hashmap *map, \n bool (*iter)(const void *item, void *udata), void *udata)\n{\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib && !iter(bucket_item(bucket), udata)) {\n return false;\n }\n }\n return true;\n}\n\n// hashmap_iter iterates one key at a time yielding a reference to an\n// entry at each iteration. Useful to write simple loops and avoid writing\n// dedicated callbacks and udata structures, as in hashmap_scan.\n//\n// map is a hash map handle. i is a pointer to a size_t cursor that\n// should be initialized to 0 at the beginning of the loop. item is a void\n// pointer pointer that is populated with the retrieved item. Note that this\n// is NOT a copy of the item stored in the hash map and can be directly\n// modified.\n//\n// Note that if hashmap_delete() is called on the hashmap being iterated,\n// the buckets are rearranged and the iterator must be reset to 0, otherwise\n// unexpected results may be returned after deletion.\n//\n// This function has not been tested for thread safety.\n//\n// The function returns true if an item was retrieved; false if the end of the\n// iteration has been reached.\nbool hashmap_iter(struct hashmap *map, size_t *i, void **item) {\n struct bucket *bucket;\n do {\n if (*i >= map->nbuckets) return false;\n bucket = bucket_at(map, *i);\n (*i)++;\n } while (!bucket->dib);\n *item = bucket_item(bucket);\n return true;\n}\n\n\n//-----------------------------------------------------------------------------\n// SipHash reference C implementation\n//\n// Copyright (c) 2012-2016 Jean-Philippe Aumasson\n// \n// Copyright (c) 2012-2014 Daniel J. Bernstein \n//\n// To the extent possible under law, the author(s) have dedicated all copyright\n// and related and neighboring rights to this software to the public domain\n// worldwide. This software is distributed without any warranty.\n//\n// You should have received a copy of the CC0 Public Domain Dedication along\n// with this software. If not, see\n// .\n//\n// default: SipHash-2-4\n//-----------------------------------------------------------------------------\nstatic uint64_t SIP64(const uint8_t *in, const size_t inlen, uint64_t seed0,\n uint64_t seed1) \n{\n#define U8TO64_LE(p) \\\n { (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \\\n ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \\\n ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \\\n ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56)) }\n#define U64TO8_LE(p, v) \\\n { U32TO8_LE((p), (uint32_t)((v))); \\\n U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); }\n#define U32TO8_LE(p, v) \\\n { (p)[0] = (uint8_t)((v)); \\\n (p)[1] = (uint8_t)((v) >> 8); \\\n (p)[2] = (uint8_t)((v) >> 16); \\\n (p)[3] = (uint8_t)((v) >> 24); }\n#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))\n#define SIPROUND \\\n { v0 += v1; v1 = ROTL(v1, 13); \\\n v1 ^= v0; v0 = ROTL(v0, 32); \\\n v2 += v3; v3 = ROTL(v3, 16); \\\n v3 ^= v2; \\\n v0 += v3; v3 = ROTL(v3, 21); \\\n v3 ^= v0; \\\n v2 += v1; v1 = ROTL(v1, 17); \\\n v1 ^= v2; v2 = ROTL(v2, 32); }\n uint64_t k0 = U8TO64_LE((uint8_t*)&seed0);\n uint64_t k1 = U8TO64_LE((uint8_t*)&seed1);\n uint64_t v3 = UINT64_C(0x7465646279746573) ^ k1;\n uint64_t v2 = UINT64_C(0x6c7967656e657261) ^ k0;\n uint64_t v1 = UINT64_C(0x646f72616e646f6d) ^ k1;\n uint64_t v0 = UINT64_C(0x736f6d6570736575) ^ k0;\n const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t));\n for (; in != end; in += 8) {\n uint64_t m = U8TO64_LE(in);\n v3 ^= m;\n SIPROUND; SIPROUND;\n v0 ^= m;\n }\n const int left = inlen & 7;\n uint64_t b = ((uint64_t)inlen) << 56;\n switch (left) {\n case 7: b |= ((uint64_t)in[6]) << 48; /* fall through */\n case 6: b |= ((uint64_t)in[5]) << 40; /* fall through */\n case 5: b |= ((uint64_t)in[4]) << 32; /* fall through */\n case 4: b |= ((uint64_t)in[3]) << 24; /* fall through */\n case 3: b |= ((uint64_t)in[2]) << 16; /* fall through */\n case 2: b |= ((uint64_t)in[1]) << 8; /* fall through */\n case 1: b |= ((uint64_t)in[0]); break;\n case 0: break;\n }\n v3 ^= b;\n SIPROUND; SIPROUND;\n v0 ^= b;\n v2 ^= 0xff;\n SIPROUND; SIPROUND; SIPROUND; SIPROUND;\n b = v0 ^ v1 ^ v2 ^ v3;\n uint64_t out = 0;\n U64TO8_LE((uint8_t*)&out, b);\n return out;\n}\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n//\n// Murmur3_86_128\n//-----------------------------------------------------------------------------\nstatic uint64_t MM86128(const void *key, const int len, uint32_t seed) {\n#define\tROTL32(x, r) ((x << r) | (x >> (32 - r)))\n#define FMIX32(h) h^=h>>16; h*=0x85ebca6b; h^=h>>13; h*=0xc2b2ae35; h^=h>>16;\n const uint8_t * data = (const uint8_t*)key;\n const int nblocks = len / 16;\n uint32_t h1 = seed;\n uint32_t h2 = seed;\n uint32_t h3 = seed;\n uint32_t h4 = seed;\n uint32_t c1 = 0x239b961b; \n uint32_t c2 = 0xab0e9789;\n uint32_t c3 = 0x38b34ae5; \n uint32_t c4 = 0xa1e38b93;\n const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);\n for (int i = -nblocks; i; i++) {\n uint32_t k1 = blocks[i*4+0];\n uint32_t k2 = blocks[i*4+1];\n uint32_t k3 = blocks[i*4+2];\n uint32_t k4 = blocks[i*4+3];\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;\n }\n const uint8_t * tail = (const uint8_t*)(data + nblocks*16);\n uint32_t k1 = 0;\n uint32_t k2 = 0;\n uint32_t k3 = 0;\n uint32_t k4 = 0;\n switch(len & 15) {\n case 15: k4 ^= tail[14] << 16; /* fall through */\n case 14: k4 ^= tail[13] << 8; /* fall through */\n case 13: k4 ^= tail[12] << 0;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n /* fall through */\n case 12: k3 ^= tail[11] << 24; /* fall through */\n case 11: k3 ^= tail[10] << 16; /* fall through */\n case 10: k3 ^= tail[ 9] << 8; /* fall through */\n case 9: k3 ^= tail[ 8] << 0;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n /* fall through */\n case 8: k2 ^= tail[ 7] << 24; /* fall through */\n case 7: k2 ^= tail[ 6] << 16; /* fall through */\n case 6: k2 ^= tail[ 5] << 8; /* fall through */\n case 5: k2 ^= tail[ 4] << 0;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n /* fall through */\n case 4: k1 ^= tail[ 3] << 24; /* fall through */\n case 3: k1 ^= tail[ 2] << 16; /* fall through */\n case 2: k1 ^= tail[ 1] << 8; /* fall through */\n case 1: k1 ^= tail[ 0] << 0;\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n /* fall through */\n };\n h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n FMIX32(h1); FMIX32(h2); FMIX32(h3); FMIX32(h4);\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n return (((uint64_t)h2)<<32)|h1;\n}\n\n//-----------------------------------------------------------------------------\n// xxHash Library\n// Copyright (c) 2012-2021 Yann Collet\n// All rights reserved.\n// \n// BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)\n//\n// xxHash3\n//-----------------------------------------------------------------------------\n#define XXH_PRIME_1 11400714785074694791ULL\n#define XXH_PRIME_2 14029467366897019727ULL\n#define XXH_PRIME_3 1609587929392839161ULL\n#define XXH_PRIME_4 9650029242287828579ULL\n#define XXH_PRIME_5 2870177450012600261ULL\n\nstatic uint64_t XXH_read64(const void* memptr) {\n uint64_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint32_t XXH_read32(const void* memptr) {\n uint32_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint64_t XXH_rotl64(uint64_t x, int r) {\n return (x << r) | (x >> (64 - r));\n}\n\nstatic uint64_t xxh3(const void* data, size_t len, uint64_t seed) {\n const uint8_t* p = (const uint8_t*)data;\n const uint8_t* const end = p + len;\n uint64_t h64;\n\n if (len >= 32) {\n const uint8_t* const limit = end - 32;\n uint64_t v1 = seed + XXH_PRIME_1 + XXH_PRIME_2;\n uint64_t v2 = seed + XXH_PRIME_2;\n uint64_t v3 = seed + 0;\n uint64_t v4 = seed - XXH_PRIME_1;\n\n do {\n v1 += XXH_read64(p) * XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n\n v2 += XXH_read64(p + 8) * XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n\n v3 += XXH_read64(p + 16) * XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n\n v4 += XXH_read64(p + 24) * XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n\n p += 32;\n } while (p <= limit);\n\n h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + \n XXH_rotl64(v4, 18);\n\n v1 *= XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n h64 ^= v1;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v2 *= XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n h64 ^= v2;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v3 *= XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n h64 ^= v3;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v4 *= XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n h64 ^= v4;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n }\n else {\n h64 = seed + XXH_PRIME_5;\n }\n\n h64 += (uint64_t)len;\n\n while (p + 8 <= end) {\n uint64_t k1 = XXH_read64(p);\n k1 *= XXH_PRIME_2;\n k1 = XXH_rotl64(k1, 31);\n k1 *= XXH_PRIME_1;\n h64 ^= k1;\n h64 = XXH_rotl64(h64, 27) * XXH_PRIME_1 + XXH_PRIME_4;\n p += 8;\n }\n\n if (p + 4 <= end) {\n h64 ^= (uint64_t)(XXH_read32(p)) * XXH_PRIME_1;\n h64 = XXH_rotl64(h64, 23) * XXH_PRIME_2 + XXH_PRIME_3;\n p += 4;\n }\n\n while (p < end) {\n h64 ^= (*p) * XXH_PRIME_5;\n h64 = XXH_rotl64(h64, 11) * XXH_PRIME_1;\n p++;\n }\n\n h64 ^= h64 >> 33;\n h64 *= XXH_PRIME_2;\n h64 ^= h64 >> 29;\n h64 *= XXH_PRIME_3;\n h64 ^= h64 >> 32;\n\n return h64;\n}\n\n// hashmap_sip returns a hash value for `data` using SipHash-2-4.\nuint64_t hashmap_sip(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n return SIP64((uint8_t*)data, len, seed0, seed1);\n}\n\n// hashmap_murmur returns a hash value for `data` using Murmur3_86_128.\nuint64_t hashmap_murmur(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return MM86128(data, len, seed0);\n}\n\nuint64_t hashmap_xxhash3(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return xxh3(data, len ,seed0);\n}\n\n//==============================================================================\n// TESTS AND BENCHMARKS\n// $ cc -DHASHMAP_TEST hashmap.c && ./a.out # run tests\n// $ cc -DHASHMAP_TEST -O3 hashmap.c && BENCH=1 ./a.out # run benchmarks\n//==============================================================================\n#ifdef HASHMAP_TEST\n\nstatic size_t deepcount(struct hashmap *map) {\n size_t count = 0;\n for (size_t i = 0; i < map->nbuckets; i++) {\n if (bucket_at(map, i)->dib) {\n count++;\n }\n }\n return count;\n}\n\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wpedantic\"\n#endif\n#ifdef __clang__\n#pragma GCC diagnostic ignored \"-Wunknown-warning-option\"\n#pragma GCC diagnostic ignored \"-Wcompound-token-split-by-macro\"\n#pragma GCC diagnostic ignored \"-Wgnu-statement-expression-from-macro-expansion\"\n#endif\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\nstatic bool rand_alloc_fail = false;\nstatic int rand_alloc_fail_odds = 3; // 1 in 3 chance malloc will fail.\nstatic uintptr_t total_allocs = 0;\nstatic uintptr_t total_mem = 0;\n\nstatic void *xmalloc(size_t size) {\n if (rand_alloc_fail && rand()%rand_alloc_fail_odds == 0) {\n return NULL;\n }\n void *mem = malloc(sizeof(uintptr_t)+size);\n assert(mem);\n *(uintptr_t*)mem = size;\n total_allocs++;\n total_mem += size;\n return (char*)mem+sizeof(uintptr_t);\n}\n\nstatic void xfree(void *ptr) {\n if (ptr) {\n total_mem -= *(uintptr_t*)((char*)ptr-sizeof(uintptr_t));\n free((char*)ptr-sizeof(uintptr_t));\n total_allocs--;\n }\n}\n\nstatic void shuffle(void *array, size_t numels, size_t elsize) {\n char tmp[elsize];\n char *arr = array;\n for (size_t i = 0; i < numels - 1; i++) {\n int j = i + rand() / (RAND_MAX / (numels - i) + 1);\n memcpy(tmp, arr + j * elsize, elsize);\n memcpy(arr + j * elsize, arr + i * elsize, elsize);\n memcpy(arr + i * elsize, tmp, elsize);\n }\n}\n\nstatic bool iter_ints(const void *item, void *udata) {\n int *vals = *(int**)udata;\n vals[*(int*)item] = 1;\n return true;\n}\n\nstatic int compare_ints_udata(const void *a, const void *b, void *udata) {\n return *(int*)a - *(int*)b;\n}\n\nstatic int compare_strs(const void *a, const void *b, void *udata) {\n return strcmp(*(char**)a, *(char**)b);\n}\n\nstatic uint64_t hash_int(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(item, sizeof(int), seed0, seed1);\n // return hashmap_sip(item, sizeof(int), seed0, seed1);\n // return hashmap_murmur(item, sizeof(int), seed0, seed1);\n}\n\nstatic uint64_t hash_str(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_sip(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_murmur(*(char**)item, strlen(*(char**)item), seed0, seed1);\n}\n\nstatic void free_str(void *item) {\n xfree(*(char**)item);\n}\n\nstatic void all(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):2000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n rand_alloc_fail = true;\n\n // test sip and murmur hashes\n assert(hashmap_sip(\"hello\", 5, 1, 2) == 2957200328589801622);\n assert(hashmap_murmur(\"hello\", 5, 1, 2) == 1682575153221130884);\n assert(hashmap_xxhash3(\"hello\", 5, 1, 2) == 2584346877953614258);\n\n int *vals;\n while (!(vals = xmalloc(N * sizeof(int)))) {}\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n struct hashmap *map;\n\n while (!(map = hashmap_new(sizeof(int), 0, seed, seed, \n hash_int, compare_ints_udata, NULL, NULL))) {}\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n // // printf(\"== %d ==\\n\", vals[i]);\n assert(map->count == (size_t)i);\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n const int *v;\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n \n for (int j = 0; j < i; j++) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n while (true) {\n v = hashmap_set(map, &vals[i]);\n if (!v) {\n assert(hashmap_oom(map));\n continue;\n } else {\n assert(!hashmap_oom(map));\n assert(v && *v == vals[i]);\n break;\n }\n }\n v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n assert(!hashmap_set(map, &vals[i]));\n assert(map->count == (size_t)(i+1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n }\n\n int *vals2;\n while (!(vals2 = xmalloc(N * sizeof(int)))) {}\n memset(vals2, 0, N * sizeof(int));\n assert(hashmap_scan(map, iter_ints, &vals2));\n\n // Test hashmap_iter. This does the same as hashmap_scan above.\n size_t iter = 0;\n void *iter_val;\n while (hashmap_iter (map, &iter, &iter_val)) {\n assert (iter_ints(iter_val, &vals2));\n }\n for (int i = 0; i < N; i++) {\n assert(vals2[i] == 1);\n }\n xfree(vals2);\n\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n const int *v;\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(map->count == (size_t)(N-i-1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n for (int j = N-1; j > i; j--) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n }\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n assert(map->count != 0);\n size_t prev_cap = map->cap;\n hashmap_clear(map, true);\n assert(prev_cap < map->cap);\n assert(map->count == 0);\n\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n prev_cap = map->cap;\n hashmap_clear(map, false);\n assert(prev_cap == map->cap);\n\n hashmap_free(map);\n\n xfree(vals);\n\n\n while (!(map = hashmap_new(sizeof(char*), 0, seed, seed,\n hash_str, compare_strs, free_str, NULL)));\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_clear(map, false);\n assert(hashmap_count(map) == 0);\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_free(map);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\n#define bench(name, N, code) {{ \\\n if (strlen(name) > 0) { \\\n printf(\"%-14s \", name); \\\n } \\\n size_t tmem = total_mem; \\\n size_t tallocs = total_allocs; \\\n uint64_t bytes = 0; \\\n clock_t begin = clock(); \\\n for (int i = 0; i < N; i++) { \\\n (code); \\\n } \\\n clock_t end = clock(); \\\n double elapsed_secs = (double)(end - begin) / CLOCKS_PER_SEC; \\\n double bytes_sec = (double)bytes/elapsed_secs; \\\n printf(\"%d ops in %.3f secs, %.0f ns/op, %.0f op/sec\", \\\n N, elapsed_secs, \\\n elapsed_secs/(double)N*1e9, \\\n (double)N/elapsed_secs \\\n ); \\\n if (bytes > 0) { \\\n printf(\", %.1f GB/sec\", bytes_sec/1024/1024/1024); \\\n } \\\n if (total_mem > tmem) { \\\n size_t used_mem = total_mem-tmem; \\\n printf(\", %.2f bytes/op\", (double)used_mem/N); \\\n } \\\n if (total_allocs > tallocs) { \\\n size_t used_allocs = total_allocs-tallocs; \\\n printf(\", %.2f allocs/op\", (double)used_allocs/N); \\\n } \\\n printf(\"\\n\"); \\\n}}\n\nstatic void benchmarks(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):5000000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n\n int *vals = xmalloc(N * sizeof(int));\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n shuffle(vals, N, sizeof(int));\n\n struct hashmap *map;\n shuffle(vals, N, sizeof(int));\n\n map = hashmap_new(sizeof(int), 0, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete\", N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n hashmap_free(map);\n\n map = hashmap_new(sizeof(int), N, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set (cap)\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get (cap)\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete (cap)\" , N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n\n hashmap_free(map);\n\n \n xfree(vals);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\nint main(void) {\n hashmap_set_allocator(xmalloc, xfree);\n\n if (getenv(\"BENCH\")) {\n printf(\"Running hashmap.c benchmarks...\\n\");\n benchmarks();\n } else {\n printf(\"Running hashmap.c tests...\\n\");\n all();\n printf(\"PASSED\\n\");\n }\n}\n\n\n#endif\n\n\n"], ["/pogocache/src/main.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit main.c is the main entry point for the Pogocache program.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"conn.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"save.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"pogocache.h\"\n#include \"gitinfo.h\"\n#include \"uring.h\"\n\n// default user flags\nint nthreads = 0; // number of client threads\nchar *port = \"9401\"; // default tcp port (non-tls)\nchar *host = \"127.0.0.1\"; // default hostname or ip address\nchar *persist = \"\"; // file to load and save data to\nchar *unixsock = \"\"; // use a unix socket\nchar *reuseport = \"no\"; // reuse tcp port for other programs\nchar *tcpnodelay = \"yes\"; // disable nagle's algorithm\nchar *quickack = \"no\"; // enable quick acks\nchar *usecas = \"no\"; // enable compare and store\nchar *keepalive = \"yes\"; // socket keepalive setting\nint backlog = 1024; // network socket accept backlog\nint queuesize = 128; // event queue size\nchar *maxmemory = \"80%\"; // Maximum memory allowed - 80% total system\nchar *evict = \"yes\"; // evict keys when maxmemory reached\nint loadfactor = 75; // hashmap load factor\nchar *keysixpack = \"yes\"; // use sixpack compression on keys\nchar *trackallocs = \"no\"; // track allocations (for debugging)\nchar *auth = \"\"; // auth token or pa\nchar *tlsport = \"\"; // enable tls over tcp port\nchar *tlscertfile = \"\"; // tls cert file\nchar *tlskeyfile = \"\"; // tls key file\nchar *tlscacertfile = \"\"; // tls ca cert file\nchar *uring = \"yes\"; // use uring (linux only)\nint maxconns = 1024; // maximum number of sockets\nchar *noticker = \"no\";\nchar *warmup = \"yes\";\n\n// Global variables calculated in main().\n// These should never change during the lifetime of the process.\n// Other source files must use the \"extern const\" specifier.\nchar *version;\nchar *githash;\nuint64_t seed;\nsize_t sysmem;\nsize_t memlimit;\nint verb; // verbosity, 0=no, 1=verbose, 2=very, 3=extremely\nbool usesixpack;\nint useallocator;\nbool usetrackallocs;\nbool useevict;\nint nshards;\nbool usetls; // use tls security (pemfile required);\nbool useauth; // use auth password\nbool usecolor; // allow color in terminal\nchar *useid; // instance id (unique to every process run)\nint64_t procstart; // proc start boot time, for uptime stat\n\n// Global atomic variable. These are safe to read and modify by other source\n// files, as long as those sources use \"atomic_\" methods.\natomic_int shutdownreq; // shutdown request counter\natomic_int_fast64_t flush_delay; // delay in seconds to next async flushall\natomic_bool sweep; // mark for async sweep, asap\natomic_bool registered; // registration is active\natomic_bool lowmem; // system is in low memory mode.\n\nstruct pogocache *cache;\n\n// min max robinhood load factor (75% performs pretty well)\n#define MINLOADFACTOR_RH 55\n#define MAXLOADFACTOR_RH 95\n\nstatic void ready(void *udata) {\n (void)udata;\n printf(\"* Ready to accept connections\\n\");\n}\n\n#define noopt \"%s\"\n\n#define HELP(format, ...) \\\n fprintf(file, format, ##__VA_ARGS__)\n\n#define HOPT(opt, desc, format, ...) \\\n fprintf(file, \" \"); \\\n fprintf(file, \"%-22s \", opt); \\\n fprintf(file, \"%-30s \", desc); \\\n if (strcmp(format, noopt) != 0) { \\\n fprintf(file, \"(default: \" format \")\", ##__VA_ARGS__); \\\n } \\\n fprintf(file, \"\\n\");\n\nstatic int calc_nshards(int nprocs) {\n switch (nprocs) {\n case 1: return 64;\n case 2: return 128;\n case 3: return 256;\n case 4: return 512;\n case 5: return 1024;\n case 6: return 2048;\n default: return 4096;\n }\n}\n\nstatic void showhelp(FILE *file) {\n int nprocs = sys_nprocs();\n int nshards = calc_nshards(nprocs);\n\n HELP(\"Usage: %s [options]\\n\", \"pogocache\");\n HELP(\"\\n\");\n\n HELP(\"Basic options:\\n\");\n HOPT(\"-h hostname\", \"listening host\", \"%s\", host);\n HOPT(\"-p port\", \"listening port\", \"%s\", port);\n HOPT(\"-s socket\", \"unix socket file\", \"%s\", *unixsock?unixsock:\"none\");\n\n HOPT(\"-v,-vv,-vvv\", \"verbose logging level\", noopt, \"\");\n HELP(\"\\n\");\n \n HELP(\"Additional options:\\n\");\n HOPT(\"--threads count\", \"number of threads\", \"%d\", nprocs);\n HOPT(\"--maxmemory value\", \"set max memory usage\", \"%s\", maxmemory);\n HOPT(\"--evict yes/no\", \"evict keys at maxmemory\", \"%s\", evict);\n HOPT(\"--persist path\", \"persistence file\", \"%s\", *persist?persist:\"none\");\n HOPT(\"--maxconns conns\", \"maximum connections\", \"%d\", maxconns);\n HELP(\"\\n\");\n \n HELP(\"Security options:\\n\");\n HOPT(\"--auth passwd\", \"auth token or password\", \"%s\", *auth?auth:\"none\");\n#ifndef NOOPENSSL\n HOPT(\"--tlsport port\", \"enable tls on port\", \"%s\", \"none\");\n HOPT(\"--tlscert certfile\", \"tls cert file\", \"%s\", \"none\");\n HOPT(\"--tlskey keyfile\", \"tls key file\", \"%s\", \"none\");\n HOPT(\"--tlscacert cacertfile\", \"tls ca-cert file\", \"%s\", \"none\");\n#endif\n HELP(\"\\n\");\n\n HELP(\"Advanced options:\\n\");\n HOPT(\"--shards count\", \"number of shards\", \"%d\", nshards);\n HOPT(\"--backlog count\", \"accept backlog\", \"%d\", backlog);\n HOPT(\"--queuesize count\", \"event queuesize size\", \"%d\", queuesize);\n HOPT(\"--reuseport yes/no\", \"reuseport for tcp\", \"%s\", reuseport);\n HOPT(\"--tcpnodelay yes/no\", \"disable nagle's algo\", \"%s\", tcpnodelay);\n HOPT(\"--quickack yes/no\", \"use quickack (linux)\", \"%s\", quickack);\n HOPT(\"--uring yes/no\", \"use uring (linux)\", \"%s\", uring);\n HOPT(\"--loadfactor percent\", \"hashmap load factor\", \"%d\", loadfactor);\n HOPT(\"--keysixpack yes/no\", \"sixpack compress keys\", \"%s\", keysixpack);\n HOPT(\"--cas yes/no\", \"use compare and store\", \"%s\", usecas);\n HELP(\"\\n\");\n}\n\nstatic void showversion(FILE *file) {\n#ifdef CCSANI\n fprintf(file, \"pogocache %s (CCSANI)\\n\", version);\n#else\n fprintf(file, \"pogocache %s\\n\", version);\n#endif\n}\n\nstatic size_t calc_memlimit(char *maxmemory) {\n if (strcmp(maxmemory, \"unlimited\") == 0) {\n return SIZE_MAX;\n }\n char *oval = maxmemory;\n while (isspace(*maxmemory)) {\n maxmemory++;\n }\n char *end;\n errno = 0;\n double mem = strtod(maxmemory, &end);\n if (errno || !(mem > 0) || !isfinite(mem)) {\n goto fail;\n }\n while (isspace(*end)) {\n end++;\n }\n #define exteq(c) \\\n (tolower(end[0])==c&& (!end[1]||(tolower(end[1])=='b'&&!end[2])))\n\n if (strcmp(end, \"\") == 0) {\n return mem;\n } else if (strcmp(end, \"%\") == 0) {\n return (((double)mem)/100.0) * sysmem;\n } else if (exteq('k')) {\n return mem*1024.0;\n } else if (exteq('m')) {\n return mem*1024.0*1024.0;\n } else if (exteq('g')) {\n return mem*1024.0*1024.0*1024.0;\n } else if (exteq('t')) {\n return mem*1024.0*1024.0*1024.0*1024.0;\n }\nfail:\n fprintf(stderr, \"# Invalid maxmemory '%s'\\n\", oval);\n showhelp(stderr);\n exit(1);\n}\n\nstatic size_t setmaxrlimit(void) {\n size_t maxconns = 0;\n struct rlimit rl;\n if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {\n maxconns = rl.rlim_max;\n rl.rlim_cur = rl.rlim_max;\n rl.rlim_max = rl.rlim_max;\n if (setrlimit(RLIMIT_NOFILE, &rl) != 0) {\n perror(\"# setrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n } else {\n perror(\"# getrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n return maxconns;\n}\n\nstatic void evicted(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)value, (void)valuelen, (void)expires, (void)udata;\n return;\n printf(\". evicted shard=%d, reason=%d, time=%\" PRIi64 \", key='%.*s'\"\n \", flags=%\" PRIu32 \", cas=%\" PRIu64 \"\\n\",\n shard, reason, time, (int)keylen, (char*)key, flags, cas);\n}\n\n#define BEGIN_FLAGS() \\\n if (0) {\n#define BFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option %s missing value\\n\", opt); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n }\n#define TFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n if (!dryrun) { \\\n op; \\\n }\n#define AFLAG(name, op) \\\n } else if (strcmp(argv[i], \"--\" name) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option --%s missing value\\n\", name); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n } \\\n } else if (strstr(argv[i], \"--\" name \"=\") == argv[i]) { \\\n if (!dryrun) { \\\n char *flag = argv[i]+strlen(name)+3; op; \\\n }\n#define END_FLAGS() \\\n } else { \\\n fprintf(stderr, \"# Unknown program option %s\\n\", argv[i]); \\\n exit(1); \\\n }\n\n#define INVALID_FLAG(name, value) \\\n fprintf(stderr, \"# Option --%s is invalid\\n\", name); \\\n exit(1);\n\nstatic atomic_bool loaded = false;\n\nvoid sigterm(int sig) {\n if (sig == SIGINT || sig == SIGTERM) {\n if (!atomic_load(&loaded) || !*persist) {\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n if (*persist) {\n printf(\"* Saving data to %s, please wait...\\n\", persist);\n int ret = save(persist, true);\n if (ret != 0) {\n perror(\"# Save failed\");\n _Exit(1);\n }\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n\n int count = atomic_fetch_add(&shutdownreq, 1);\n if (count > 0 && sig == SIGINT) {\n printf(\"# User forced shutdown\\n\");\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n }\n}\n\nstatic void tick(void) {\n if (!atomic_load_explicit(&loaded, __ATOMIC_ACQUIRE)) {\n return;\n }\n // Memory usage check\n if (memlimit < SIZE_MAX) {\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n size_t memusage = meminfo.rss;\n if (!lowmem) {\n if (memusage > memlimit) {\n atomic_store(&lowmem, true);\n if (verb > 0) {\n printf(\"# Low memory mode on\\n\");\n }\n }\n } else {\n if (memusage < memlimit) {\n atomic_store(&lowmem, false);\n if (verb > 0) {\n printf(\"# Low memory mode off\\n\");\n }\n }\n }\n }\n\n // Print allocations to terminal.\n if (usetrackallocs) {\n printf(\". keys=%zu, allocs=%zu, conns=%zu\\n\",\n pogocache_count(cache, 0), xallocs(), net_nconns());\n }\n\n}\n\nstatic void *ticker(void *arg) {\n (void)arg;\n while (1) {\n tick();\n sleep(1);\n }\n return 0;\n}\n\nstatic void listening(void *udata) {\n (void)udata;\n printf(\"* Network listener established\\n\");\n if (*persist) {\n if (!cleanwork(persist)) {\n // An error message has already been printed\n _Exit(0);\n }\n if (access(persist, F_OK) == 0) {\n printf(\"* Loading data from %s, please wait...\\n\", persist);\n struct load_stats stats;\n int64_t start = sys_now();\n int ret = load(persist, true, &stats);\n if (ret != 0) {\n perror(\"# Load failed\");\n _Exit(1);\n }\n double elapsed = (sys_now()-start)/1e9;\n printf(\"* Loaded %zu entries (%zu expired) (%.3f MB in %.3f secs) \"\n \"(%.0f entries/sec, %.0f MB/sec) \\n\", \n stats.ninserted, stats.nexpired,\n stats.csize/1024.0/1024.0, elapsed, \n (stats.ninserted+stats.nexpired)/elapsed, \n stats.csize/1024.0/1024.0/elapsed);\n }\n }\n atomic_store(&loaded, true);\n}\n\nstatic void yield(void *udata) {\n (void)udata;\n sched_yield();\n}\n\nint main(int argc, char *argv[]) {\n procstart = sys_now();\n\n // Intercept signals\n signal(SIGPIPE, SIG_IGN);\n signal(SIGINT, sigterm);\n signal(SIGTERM, sigterm);\n\n // Line buffer logging so pipes will stream.\n setvbuf(stdout, 0, _IOLBF, 0);\n setvbuf(stderr, 0, _IOLBF, 0);\n char guseid[17];\n memset(guseid, 0, 17);\n useid = guseid;\n sys_genuseid(useid); \n const char *maxmemorymb = 0;\n seed = sys_seed();\n verb = 0;\n usetls = false;\n useauth = false;\n lowmem = false;\n version = GITVERS;\n githash = GITHASH;\n\n \n\n\n if (uring_available()) {\n uring = \"yes\";\n } else {\n uring = \"no\";\n }\n\n atomic_init(&shutdownreq, 0);\n atomic_init(&flush_delay, 0);\n atomic_init(&sweep, false);\n atomic_init(®istered, false);\n\n // Parse program flags\n for (int ii = 0; ii < 2; ii++) {\n bool dryrun = ii == 0;\n for (int i = 1; i < argc; i++) {\n if (strcmp(argv[i], \"--help\") == 0) {\n showhelp(stdout);\n exit(0);\n }\n if (strcmp(argv[i], \"--version\") == 0) {\n showversion(stdout);\n exit(0);\n }\n BEGIN_FLAGS()\n BFLAG(\"-p\", port = flag)\n BFLAG(\"-h\", host = flag)\n BFLAG(\"-s\", unixsock = flag)\n TFLAG(\"-v\", verb = 1)\n TFLAG(\"-vv\", verb = 2)\n TFLAG(\"-vvv\", verb = 3)\n AFLAG(\"port\", port = flag)\n AFLAG(\"threads\", nthreads = atoi(flag))\n AFLAG(\"shards\", nshards = atoi(flag))\n AFLAG(\"backlog\", backlog = atoi(flag))\n AFLAG(\"queuesize\", queuesize = atoi(flag))\n AFLAG(\"maxmemory\", maxmemory = flag)\n AFLAG(\"evict\", evict = flag)\n AFLAG(\"reuseport\", reuseport = flag)\n AFLAG(\"uring\", uring = flag)\n AFLAG(\"tcpnodelay\", tcpnodelay = flag)\n AFLAG(\"keepalive\", keepalive = flag)\n AFLAG(\"quickack\", quickack = flag)\n AFLAG(\"trackallocs\", trackallocs = flag)\n AFLAG(\"cas\", usecas = flag)\n AFLAG(\"maxconns\", maxconns = atoi(flag))\n AFLAG(\"loadfactor\", loadfactor = atoi(flag))\n AFLAG(\"sixpack\", keysixpack = flag)\n AFLAG(\"seed\", seed = strtoull(flag, 0, 10))\n AFLAG(\"auth\", auth = flag)\n AFLAG(\"persist\", persist = flag)\n AFLAG(\"noticker\", noticker = flag)\n AFLAG(\"warmup\", warmup = flag)\n#ifndef NOOPENSSL\n // TLS flags\n AFLAG(\"tlsport\", tlsport = flag)\n AFLAG(\"tlscert\", tlscertfile = flag)\n AFLAG(\"tlscacert\", tlscacertfile = flag)\n AFLAG(\"tlskey\", tlskeyfile = flag)\n#endif\n // Hidden or alternative flags\n BFLAG(\"-t\", nthreads = atoi(flag)) // --threads=\n BFLAG(\"-m\", maxmemorymb = flag) // --maxmemory=M\n TFLAG(\"-M\", evict = \"no\") // --evict=no\n END_FLAGS()\n }\n }\n\n usecolor = isatty(fileno(stdout));\n\n if (strcmp(evict, \"yes\") == 0) {\n useevict = true;\n } else if (strcmp(evict, \"no\") == 0) {\n useevict = false;\n } else {\n INVALID_FLAG(\"evict\", evict);\n }\n\n bool usereuseport;\n if (strcmp(reuseport, \"yes\") == 0) {\n usereuseport = true;\n } else if (strcmp(reuseport, \"no\") == 0) {\n usereuseport = false;\n } else {\n INVALID_FLAG(\"reuseport\", reuseport);\n }\n\n if (strcmp(trackallocs, \"yes\") == 0) {\n usetrackallocs = true;\n } else if (strcmp(trackallocs, \"no\") == 0) {\n usetrackallocs = false;\n } else {\n INVALID_FLAG(\"trackallocs\", trackallocs);\n }\n\n bool usetcpnodelay;\n if (strcmp(tcpnodelay, \"yes\") == 0) {\n usetcpnodelay = true;\n } else if (strcmp(tcpnodelay, \"no\") == 0) {\n usetcpnodelay = false;\n } else {\n INVALID_FLAG(\"tcpnodelay\", tcpnodelay);\n }\n\n bool usekeepalive;\n if (strcmp(keepalive, \"yes\") == 0) {\n usekeepalive = true;\n } else if (strcmp(keepalive, \"no\") == 0) {\n usekeepalive = false;\n } else {\n INVALID_FLAG(\"keepalive\", keepalive);\n }\n\n\n bool usecasflag;\n if (strcmp(usecas, \"yes\") == 0) {\n usecasflag = true;\n } else if (strcmp(usecas, \"no\") == 0) {\n usecasflag = false;\n } else {\n INVALID_FLAG(\"usecas\", usecas);\n }\n\n if (maxconns <= 0) {\n maxconns = 1024;\n }\n\n\n#ifndef __linux__\n bool useuring = false;\n#else\n bool useuring;\n if (strcmp(uring, \"yes\") == 0) {\n useuring = true;\n } else if (strcmp(uring, \"no\") == 0) {\n useuring = false;\n } else {\n INVALID_FLAG(\"uring\", uring);\n }\n if (useuring) {\n if (!uring_available()) {\n useuring = false;\n }\n }\n#endif\n\n#ifndef __linux__\n quickack = \"no\";\n#endif\n bool usequickack;\n if (strcmp(quickack, \"yes\") == 0) {\n usequickack = true;\n } else if (strcmp(quickack, \"no\") == 0) {\n usequickack = false;\n } else {\n INVALID_FLAG(\"quickack\", quickack);\n }\n\n if (strcmp(keysixpack, \"yes\") == 0) {\n usesixpack = true;\n } else if (strcmp(keysixpack, \"no\") == 0) {\n usesixpack = false;\n } else {\n INVALID_FLAG(\"sixpack\", keysixpack);\n }\n\n // Threads\n if (nthreads <= 0) {\n nthreads = sys_nprocs();\n } else if (nthreads > 4096) {\n nthreads = 4096; \n }\n\n if (nshards == 0) {\n nshards = calc_nshards(nthreads);\n }\n if (nshards <= 0 || nshards > 65536) {\n nshards = 65536;\n }\n\n if (loadfactor < MINLOADFACTOR_RH) {\n loadfactor = MINLOADFACTOR_RH;\n printf(\"# loadfactor minumum set to %d\\n\", MINLOADFACTOR_RH);\n } else if (loadfactor > MAXLOADFACTOR_RH) {\n loadfactor = MAXLOADFACTOR_RH;\n printf(\"# loadfactor maximum set to %d\\n\", MAXLOADFACTOR_RH);\n }\n\n if (queuesize < 1) {\n queuesize = 1;\n printf(\"# queuesize adjusted to 1\\n\");\n } else if (queuesize > 4096) {\n queuesize = 4096;\n printf(\"# queuesize adjusted to 4096\\n\");\n }\n\n if (maxmemorymb) {\n size_t sz = strlen(maxmemorymb)+2;\n char *str = xmalloc(sz);\n snprintf(str, sz, \"%sM\", maxmemorymb);\n maxmemory = str;\n }\n\n if (!*port || strcmp(port, \"0\") == 0) {\n port = \"\";\n }\n\n if (!*tlsport || strcmp(tlsport, \"0\") == 0) {\n usetls = false;\n tlsport = \"\";\n } else {\n usetls = true;\n tls_init();\n }\n\n if (*auth) {\n useauth = true;\n }\n setmaxrlimit();\n sysmem = sys_memory();\n memlimit = calc_memlimit(maxmemory);\n\n if (memlimit == SIZE_MAX) {\n evict = \"no\";\n useevict = false;\n }\n\n struct pogocache_opts opts = {\n .yield = yield,\n .seed = seed,\n .malloc = xmalloc,\n .free = xfree,\n .nshards = nshards,\n .loadfactor = loadfactor,\n .usecas = usecasflag,\n .evicted = evicted,\n .allowshrink = true,\n .usethreadbatch = true,\n };\n // opts.yield = 0;\n\n cache = pogocache_new(&opts);\n if (!cache) {\n perror(\"pogocache_new\");\n abort();\n }\n\n // Print the program details\n printf(\"* Pogocache (pid: %d, arch: %s%s, version: %s, git: %s)\\n\",\n getpid(), sys_arch(), sizeof(uintptr_t)==4?\", mode: 32-bit\":\"\", version,\n githash);\n char buf0[64], buf1[64];\n char buf2[64];\n if (memlimit < SIZE_MAX) {\n snprintf(buf2, sizeof(buf2), \"%.0f%%/%s\", (double)memlimit/sysmem*100.0,\n memstr(memlimit, buf1));\n } else {\n strcpy(buf2, \"unlimited\");\n }\n printf(\"* Memory (system: %s, max: %s, evict: %s)\\n\", memstr(sysmem, buf0),\n buf2, evict);\n printf(\"* Features (verbosity: %s, sixpack: %s, cas: %s, persist: %s, \"\n \"uring: %s)\\n\",\n verb==0?\"normal\":verb==1?\"verbose\":verb==2?\"very\":\"extremely\",\n keysixpack, usecas, *persist?persist:\"none\", useuring?\"yes\":\"no\");\n char tcp_addr[256];\n snprintf(tcp_addr, sizeof(tcp_addr), \"%s:%s\", host, port);\n printf(\"* Network (port: %s, unixsocket: %s, backlog: %d, reuseport: %s, \"\n \"maxconns: %d)\\n\", *port?port:\"none\", *unixsock?unixsock:\"none\",\n backlog, reuseport, maxconns);\n printf(\"* Socket (tcpnodelay: %s, keepalive: %s, quickack: %s)\\n\",\n tcpnodelay, keepalive, quickack);\n printf(\"* Threads (threads: %d, queuesize: %d)\\n\", nthreads, queuesize);\n printf(\"* Shards (shards: %d, loadfactor: %d%%)\\n\", nshards, loadfactor);\n printf(\"* Security (auth: %s, tlsport: %s)\\n\", \n strlen(auth)>0?\"enabled\":\"disabled\", *tlsport?tlsport:\"none\");\n if (strcmp(noticker,\"yes\") == 0) {\n printf(\"# NO TICKER\\n\");\n } else {\n pthread_t th;\n int ret = pthread_create(&th, 0, ticker, 0);\n if (ret == -1) {\n perror(\"# pthread_create(ticker)\");\n exit(1);\n }\n }\n#ifdef DATASETOK\n printf(\"# DATASETOK\\n\");\n#endif\n#ifdef CMDGETNIL\n printf(\"# CMDGETNIL\\n\");\n#endif\n#ifdef CMDSETOK\n printf(\"# CMDSETOK\\n\");\n#endif\n#ifdef ENABLELOADREAD\n printf(\"# ENABLELOADREAD\\n\");\n#endif\n struct net_opts nopts = {\n .host = host,\n .port = port,\n .tlsport = tlsport,\n .unixsock = unixsock,\n .reuseport = usereuseport,\n .tcpnodelay = usetcpnodelay,\n .keepalive = usekeepalive,\n .quickack = usequickack,\n .backlog = backlog,\n .queuesize = queuesize,\n .nthreads = nthreads,\n .nowarmup = strcmp(warmup, \"no\") == 0,\n .nouring = !useuring,\n .listening = listening,\n .ready = ready,\n .data = evdata,\n .opened = evopened,\n .closed = evclosed,\n .maxconns = maxconns,\n };\n net_main(&nopts);\n return 0;\n}\n"], ["/pogocache/src/sys.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit sys.c provides various system-level functions.\n#if __linux__\n#define _GNU_SOURCE\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#ifdef __APPLE__\n#include \n#include \n#endif\n#include \"sys.h\"\n\nint sys_nprocs(void) {\n static atomic_int nprocsa = 0;\n int nprocs = atomic_load_explicit(&nprocsa, __ATOMIC_RELAXED);\n if (nprocs > 0) {\n return nprocs;\n }\n int logical = sysconf(_SC_NPROCESSORS_CONF);\n logical = logical < 1 ? 1 : logical;\n int physical = logical;\n int affinity = physical;\n#ifdef __linux__\n affinity = 0;\n cpu_set_t mask;\n CPU_ZERO(&mask);\n if (sched_getaffinity(0, sizeof(mask), &mask) == -1) {\n perror(\"sched_getaffinity\");\n return 1;\n }\n for (int i = 0; i < CPU_SETSIZE; i++) {\n if (CPU_ISSET(i, &mask)) {\n affinity++;\n }\n }\n double hyper = ceil((double)logical / (double)physical);\n hyper = hyper < 1 ? 1 : hyper;\n affinity /= hyper;\n#endif\n nprocs = affinity;\n nprocs = nprocs < 1 ? 1 : nprocs;\n atomic_store_explicit(&nprocsa, nprocs, __ATOMIC_RELAXED);\n return nprocs;\n}\n\n#ifndef __linux__\n#include \n#endif\n\nsize_t sys_memory(void) {\n size_t sysmem = 0;\n#ifdef __linux__\n FILE *f = fopen(\"/proc/meminfo\", \"rb\");\n if (f) {\n char buf[4096];\n size_t n = fread(buf, 1, sizeof(buf)-1, f);\n buf[n] = '\\0';\n char *s = 0;\n char *e = 0;\n s = strstr(buf, \"MemTotal\");\n if (s) s = strstr(s, \": \");\n if (s) e = strstr(s, \"\\n\");\n if (e) {\n *e = '\\0';\n s += 2;\n while (isspace(*s)) s++;\n if (strstr(s, \" kB\")) {\n s[strstr(s, \" kB\")-s] = '\\0';\n }\n errno = 0;\n char *end;\n int64_t isysmem = strtoll(s, &end, 10);\n assert(errno == 0 && isysmem > 0);\n isysmem *= 1024;\n sysmem = isysmem;\n }\n fclose(f);\n }\n#else\n size_t memsize = 0;\n size_t len = sizeof(memsize);\n if (sysctlbyname(\"hw.memsize\", &memsize, &len, 0, 0) == 0) {\n sysmem = memsize;\n }\n#endif\n if (sysmem == 0) {\n fprintf(stderr, \"# could not detect total system memory, bailing\\n\");\n exit(1);\n }\n return sysmem;\n}\n\nuint64_t sys_seed(void) {\n #define NSEEDCAP 64\n static __thread int nseeds = 0;\n static __thread uint64_t seeds[NSEEDCAP];\n if (nseeds == 0) {\n // Generate a group of new seeds\n FILE *f = fopen(\"/dev/urandom\", \"rb+\");\n if (!f) {\n perror(\"# /dev/urandom\");\n exit(1);\n }\n size_t n = fread(seeds, 8, NSEEDCAP, f);\n (void)n;\n assert(n == NSEEDCAP);\n fclose(f);\n nseeds = NSEEDCAP;\n }\n return seeds[--nseeds];\n}\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// Return monotonic nanoseconds of the CPU clock.\nint64_t sys_now(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// Return unix timestamp in nanoseconds\nint64_t sys_unixnow(void) {\n struct timespec now = { 0 };\n clock_gettime(CLOCK_REALTIME, &now);\n return nanotime(&now);\n}\n\n#ifdef __APPLE__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n task_basic_info_data_t taskInfo;\n mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT;\n kern_return_t kr = task_info(mach_task_self(), TASK_BASIC_INFO,\n (task_info_t)&taskInfo, &infoCount);\n if (kr != KERN_SUCCESS) {\n fprintf(stderr, \"# task_info: %s\\n\", mach_error_string(kr));\n abort();\n }\n info->virt = taskInfo.virtual_size;\n info->rss = taskInfo.resident_size;\n}\n#elif __linux__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n FILE *f = fopen(\"/proc/self/statm\", \"r\");\n if (!f) {\n perror(\"# open /proc/self/statm\");\n abort();\n }\n unsigned long vm_pages, rss_pages;\n long x = fscanf(f, \"%lu %lu\", &vm_pages, &rss_pages);\n fclose(f);\n if (x != 2) {\n perror(\"# read /proc/self/statm\");\n abort();\n }\n\n // Get the system page size (in bytes)\n size_t page_size = sysconf(_SC_PAGESIZE);\n assert(page_size > 0);\n\n // Convert pages to bytes\n info->virt = vm_pages * page_size;\n info->rss = rss_pages * page_size;\n}\n#endif\n\n#include \n\nconst char *sys_arch(void) {\n static __thread bool got = false;\n static __thread char arch[1024] = \"unknown/error\";\n if (!got) {\n struct utsname unameData;\n if (uname(&unameData) == 0) {\n snprintf(arch, sizeof(arch), \"%s/%s\", unameData.sysname, \n unameData.machine);\n char *p = arch;\n while (*p) {\n *p = tolower(*p);\n p++;\n }\n got = true;\n }\n }\n return arch;\n}\n\nvoid sys_genuseid(char useid[16]) {\n const uint8_t chs[] = \n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"0123456789\";\n uint64_t a = sys_seed();\n uint64_t b = sys_seed();\n uint8_t bytes[16];\n memcpy(bytes, &a, 8);\n memcpy(bytes+8, &b, 8);\n for (int i = 0; i < 16; i++) {\n bytes[i] = chs[bytes[i]%62];\n }\n memcpy(useid, bytes, 16);\n}\n\n// Returns a unique thread id for the current thread.\n// This is an artificial generated value that is always distinct. \nuint64_t sys_threadid(void) {\n static atomic_int_fast64_t next = 0;\n static __thread uint64_t id = 0;\n if (id == 0) {\n id = atomic_fetch_add_explicit(&next, 1, __ATOMIC_RELEASE);\n }\n return id;\n}\n"], ["/pogocache/src/tls.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit tls.c provides an interface for translating TLS bytes streams.\n// This is intended to be used with client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"tls.h\"\n#include \"xmalloc.h\"\n#include \"openssl.h\"\n\n#ifdef NOOPENSSL\n\nvoid tls_init(void) {}\nbool tls_accept(int fd, struct tls **tls_out) {\n (void)fd;\n *tls_out = 0;\n return true;\n}\nint tls_close(struct tls *tls, int fd) {\n (void)tls;\n return close(fd);\n}\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n (void)tls;\n return read(fd, data, len);\n}\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n (void)tls;\n return write(fd, data, len);\n}\n#else\n\nextern const bool usetls;\nextern const char *tlscertfile;\nextern const char *tlscacertfile;\nextern const char *tlskeyfile;\n\nstatic SSL_CTX *ctx;\n\nstruct tls {\n SSL *ssl;\n};\n\nvoid tls_init(void) {\n if (!usetls) {\n return;\n }\n ctx = SSL_CTX_new(TLS_server_method());\n if (!SSL_CTX_load_verify_locations(ctx, tlscacertfile, 0)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(1);\n }\n if (!SSL_CTX_use_certificate_file(ctx, tlscertfile , SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_use_PrivateKey_file(ctx, tlskeyfile, SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_check_private_key(ctx)) {\n printf(\"# tls: private key does not match the certificate\\n\");\n exit(EXIT_FAILURE);\n }\n}\n\nbool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n // tls is disabled for all of pogocache.\n *tls_out = 0;\n return true;\n }\n SSL *ssl = SSL_new(ctx);\n if (!ssl) {\n printf(\"# tls: SSL_new() failed\\n\");\n *tls_out = 0;\n return false;\n }\n SSL_set_fd(ssl, fd);\n SSL_set_verify(ssl, SSL_VERIFY_PEER, 0);\n int ret = SSL_accept(ssl);\n if (ret <= 0) {\n int err = SSL_get_error(ssl, ret);\n if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {\n printf(\"# tls: SSL_accept() failed\\n\");\n ERR_print_errors_fp(stderr);\n SSL_free(ssl);\n *tls_out = 0;\n return false;\n }\n }\n struct tls *tls = xmalloc(sizeof(struct tls));\n memset(tls, 0, sizeof(struct tls));\n tls->ssl = ssl;\n *tls_out = tls;\n return true;\n}\n\nint tls_close(struct tls *tls, int fd) {\n if (tls) {\n if (SSL_shutdown(tls->ssl) == 0) {\n SSL_shutdown(tls->ssl);\n }\n SSL_free(tls->ssl);\n xfree(tls);\n }\n return close(fd);\n}\n\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n if (!tls) {\n return write(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_write_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else {\n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n if (!tls) {\n return read(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_read_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else { \n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\n#endif\n"], ["/pogocache/src/buf.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit buf.c is a simple interface for creating byte buffers\n#include \n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"buf.h\"\n\nvoid buf_ensure(struct buf *buf, size_t len) {\n if (buf->len+len > buf->cap) {\n size_t oldcap = buf->cap;\n size_t newcap = buf->cap;\n if (oldcap == 0) {\n buf->data = 0;\n newcap = 16;\n } else {\n newcap *= 2;\n }\n while (buf->len+len > newcap) {\n newcap *= 2;\n }\n buf->data = xrealloc(buf->data, newcap);\n buf->cap = newcap;\n }\n}\n\nvoid buf_append(struct buf *buf, const void *data, size_t len){\n buf_ensure(buf, len);\n memcpy(buf->data+buf->len, data, len);\n buf->len += len;\n}\n\nvoid buf_append_byte(struct buf *buf, char byte) {\n if (buf->len < buf->cap) {\n buf->data[buf->len++] = byte;\n } else {\n buf_append(buf, &byte, 1);\n }\n}\n\nvoid buf_clear(struct buf *buf) {\n // No capacity means this buffer is owned somewhere else and we \n // must not free the data.\n if (buf->cap) {\n xfree(buf->data);\n }\n memset(buf, 0, sizeof(struct buf));\n}\n\nvoid buf_append_uvarint(struct buf *buf, uint64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_u64(buf->data+buf->len, x);\n buf->len += n;\n}\n\nvoid buf_append_varint(struct buf *buf, int64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_i64(buf->data+buf->len, x);\n buf->len += n;\n}\n"], ["/pogocache/src/xmalloc.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit xmalloc.c is the primary allocator interface. The xmalloc/xfree\n// functions should be used instead of malloc/free.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#if defined(__linux__) && defined(__GLIBC__)\n#include \n#define HAS_MALLOC_H\n#endif\n\n// from main.c\nextern const int useallocator;\nextern const bool usetrackallocs;\n\n#ifdef NOTRACKALLOCS\n#define add_alloc()\n#define sub_alloc()\nsize_t xallocs(void) {\n return 0;\n}\n#else\nstatic atomic_int_fast64_t nallocs = 0;\n\nsize_t xallocs(void) {\n if (usetrackallocs) {\n return atomic_load(&nallocs);\n } else {\n return 0;\n }\n}\n\nstatic void add_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_add_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n\nstatic void sub_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_sub_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n#endif\n\nstatic void check_ptr(void *ptr) {\n if (!ptr) {\n fprintf(stderr, \"# %s\\n\", strerror(ENOMEM));\n abort();\n }\n}\n\nvoid *xmalloc(size_t size) {\n void *ptr = malloc(size);\n check_ptr(ptr);\n add_alloc();\n return ptr;\n}\n\nvoid *xrealloc(void *ptr, size_t size) {\n if (!ptr) {\n return xmalloc(size);\n }\n ptr = realloc(ptr, size);\n check_ptr(ptr);\n return ptr;\n}\n\nvoid xfree(void *ptr) {\n if (!ptr) {\n return;\n }\n free(ptr);\n sub_alloc();\n}\n\nvoid xpurge(void) {\n#ifdef HAS_MALLOC_H\n // Releases unused heap memory to OS\n malloc_trim(0);\n#endif\n}\n"], ["/pogocache/src/stats.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit stats.c tracks various stats. Mostly for the memcache protocol.\n#include \n#include \"stats.h\"\n\nstatic atomic_uint_fast64_t g_stat_cmd_flush = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_touch = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_meta = 0;\nstatic atomic_uint_fast64_t g_stat_get_expired = 0;\nstatic atomic_uint_fast64_t g_stat_get_flushed = 0;\nstatic atomic_uint_fast64_t g_stat_delete_misses = 0;\nstatic atomic_uint_fast64_t g_stat_delete_hits = 0;\nstatic atomic_uint_fast64_t g_stat_incr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_incr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_decr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_decr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_misses = 0;\nstatic atomic_uint_fast64_t g_stat_cas_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_badval = 0;\nstatic atomic_uint_fast64_t g_stat_touch_hits = 0;\nstatic atomic_uint_fast64_t g_stat_touch_misses = 0;\nstatic atomic_uint_fast64_t g_stat_store_too_large = 0;\nstatic atomic_uint_fast64_t g_stat_store_no_memory = 0;\nstatic atomic_uint_fast64_t g_stat_auth_cmds = 0;\nstatic atomic_uint_fast64_t g_stat_auth_errors = 0;\n\nvoid stat_cmd_flush_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_flush, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_touch_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_touch, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_meta_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_meta, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_expired_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_expired, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_flushed_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_flushed, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_badval_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_badval, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_too_large_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_too_large, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_no_memory_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_no_memory, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_cmds_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_cmds, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_errors_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_errors, 1, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_flush(void) {\n return atomic_load_explicit(&g_stat_cmd_flush, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_touch(void) {\n return atomic_load_explicit(&g_stat_cmd_touch, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_meta(void) {\n return atomic_load_explicit(&g_stat_cmd_meta, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_expired(void) {\n return atomic_load_explicit(&g_stat_get_expired, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_flushed(void) {\n return atomic_load_explicit(&g_stat_get_flushed, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_misses(void) {\n return atomic_load_explicit(&g_stat_delete_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_hits(void) {\n return atomic_load_explicit(&g_stat_delete_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_misses(void) {\n return atomic_load_explicit(&g_stat_incr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_hits(void) {\n return atomic_load_explicit(&g_stat_incr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_misses(void) {\n return atomic_load_explicit(&g_stat_decr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_hits(void) {\n return atomic_load_explicit(&g_stat_decr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_misses(void) {\n return atomic_load_explicit(&g_stat_cas_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_hits(void) {\n return atomic_load_explicit(&g_stat_cas_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_badval(void) {\n return atomic_load_explicit(&g_stat_cas_badval, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_hits(void) {\n return atomic_load_explicit(&g_stat_touch_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_misses(void) {\n return atomic_load_explicit(&g_stat_touch_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_too_large(void) {\n return atomic_load_explicit(&g_stat_store_too_large, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_no_memory(void) {\n return atomic_load_explicit(&g_stat_store_no_memory, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_cmds(void) {\n return atomic_load_explicit(&g_stat_auth_cmds, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_errors(void) {\n return atomic_load_explicit(&g_stat_auth_errors, __ATOMIC_RELAXED);\n}\n\n\n"], ["/pogocache/src/uring.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n#include \n#include \"uring.h\"\n\nbool uring_available(void) {\n#ifdef NOURING\n return false;\n#else\n static atomic_int available = -1;\n if (atomic_load_explicit(&available, __ATOMIC_ACQUIRE )== -1) {\n struct io_uring ring;\n if (io_uring_queue_init(1, &ring, 0) == 0) {\n io_uring_queue_exit(&ring);\n atomic_store(&available, 1);\n } else {\n atomic_store(&available, 0);\n }\n }\n return atomic_load_explicit(&available, __ATOMIC_ACQUIRE) == 1;\n#endif\n}\n"]], "task_instance_info": {"created_time": "2025-08-20 18:04:05", "created_task_model": "DeepSeek-R1", "function_skeleton": "static ssize_t parse_memcache_telnet(const char *data, size_t len, \n struct args *args) {}", "code_description": "This code segment functions to: Tokenize input commands by splitting on whitespace boundaries and detect the termination of a command line sequence while handling carriage return and newline conventions."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```c\n[TASK_BEGIN]\nstatic ssize_t parse_memcache_telnet(const char *data, size_t len, struct args *args) {\n // Find the end of command line\n const char *end = memchr(data, '\\r', len);\n if (!end || end + 1 >= data + len || *(end + 1) != '\\n') {\n return 0; // Need more data\n }\n \n size_t line_len = end - data + 2;\n const char *line_end = end;\n size_t num_args = 0;\n const char *p = data;\n\n while (p < line_end && num_args < MAXARGS) {\n // Skip whitespace\n while (p < line_end && (*p == ' ' || *p == '\\t')) {\n p++;\n }\n if (p >= line_end) break;\n\n // Find token end\n const char *start = p;\n while (p < line_end && *p != ' ' && *p != '\\t') {\n p++;\n }\n\n // Add token to args\n args->bufs[num_args].data = start;\n args->bufs[num_args].len = p - start;\n num_args++;\n }\n\n args->len = num_args;\n return line_len;\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 18-05-00"}, "editdistance_info": {"edit_distance": 23.6515, "calculate_time": "2025-08-20 18:05:00", "true_code_clean": "static ssize_t parse_memcache_telnet(const char *data, size_t len, \n struct args *args)\n{\n const char *p = data;\n const char *end = data+len;\n const char *s = p;\n char last = 0;\n while (p < end) {\n char ch = *(p++);\n if (ch == ' ') {\n size_t wn = p-s-1;\n args_append(args, s, wn, true);\n s = p;\n continue;\n }\n if (ch == '\\n') {\n size_t wn = p-s-1;\n if (last == '\\r') {\n wn--;\n }\n if (wn > 0) {\n args_append(args, s, wn, true);\n }\n return p-data;\n }\n last = ch;\n }\n return 0;\n}", "predict_code_clean": "static ssize_t parse_memcache_telnet(const char *data, size_t len, struct args *args) {\n const char *end = memchr(data, '\\r', len);\n if (!end || end + 1 >= data + len || *(end + 1) != '\\n') {\n return 0; \n }\n size_t line_len = end - data + 2;\n const char *line_end = end;\n size_t num_args = 0;\n const char *p = data;\n while (p < line_end && num_args < MAXARGS) {\n while (p < line_end && (*p == ' ' || *p == '\\t')) {\n p++;\n }\n if (p >= line_end) break;\n const char *start = p;\n while (p < line_end && *p != ' ' && *p != '\\t') {\n p++;\n }\n args->bufs[num_args].data = start;\n args->bufs[num_args].len = p - start;\n num_args++;\n }\n args->len = num_args;\n return line_len;\n}"}} {"repo_name": "pogocache", "file_name": "/pogocache/src/lz4.c", "inference_info": {"prefix_code": "/*\n LZ4 - Fast LZ compression algorithm\n Copyright (C) 2011-2023, Yann Collet.\n\n BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following disclaimer\n in the documentation and/or other materials provided with the\n distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n You can contact the author at :\n - LZ4 homepage : http://www.lz4.org\n - LZ4 source repository : https://github.com/lz4/lz4\n*/\n\n/*-************************************\n* Tuning parameters\n**************************************/\n/*\n * LZ4_HEAPMODE :\n * Select how stateless compression functions like `LZ4_compress_default()`\n * allocate memory for their hash table,\n * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).\n */\n#ifndef LZ4_HEAPMODE\n# define LZ4_HEAPMODE 0\n#endif\n\n/*\n * LZ4_ACCELERATION_DEFAULT :\n * Select \"acceleration\" for LZ4_compress_fast() when parameter value <= 0\n */\n#define LZ4_ACCELERATION_DEFAULT 1\n/*\n * LZ4_ACCELERATION_MAX :\n * Any \"acceleration\" value higher than this threshold\n * get treated as LZ4_ACCELERATION_MAX instead (fix #876)\n */\n#define LZ4_ACCELERATION_MAX 65537\n\n\n/*-************************************\n* CPU Feature Detection\n**************************************/\n/* LZ4_FORCE_MEMORY_ACCESS\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method is portable but violate C standard.\n * It can generate buggy code on targets which assembly generation depends on alignment.\n * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */\n# if defined(__GNUC__) && \\\n ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \\\n || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n# define LZ4_FORCE_MEMORY_ACCESS 2\n# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)\n# define LZ4_FORCE_MEMORY_ACCESS 1\n# endif\n#endif\n\n/*\n * LZ4_FORCE_SW_BITCOUNT\n * Define this parameter if your target system or compiler does not support hardware bit count\n */\n#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */\n# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */\n# define LZ4_FORCE_SW_BITCOUNT\n#endif\n\n\n\n/*-************************************\n* Dependency\n**************************************/\n/*\n * LZ4_SRC_INCLUDED:\n * Amalgamation flag, whether lz4.c is included\n */\n#ifndef LZ4_SRC_INCLUDED\n# define LZ4_SRC_INCLUDED 1\n#endif\n\n#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS\n# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */\n#endif\n\n#ifndef LZ4_STATIC_LINKING_ONLY\n# define LZ4_STATIC_LINKING_ONLY\n#endif\n#include \"lz4.h\"\n/* see also \"memory routines\" below */\n\n\n/*-************************************\n* Compiler Options\n**************************************/\n#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */\n# include /* only present in VS2005+ */\n# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */\n# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */\n# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */\n# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */\n#endif /* _MSC_VER */\n\n#ifndef LZ4_FORCE_INLINE\n# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */\n# define LZ4_FORCE_INLINE static __forceinline\n# else\n# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */\n# if defined (__GNUC__) || defined (__clang__)\n# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))\n# else\n# define LZ4_FORCE_INLINE static inline\n# endif\n# else\n# define LZ4_FORCE_INLINE static\n# endif /* __STDC_VERSION__ */\n# endif /* _MSC_VER */\n#endif /* LZ4_FORCE_INLINE */\n\n/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE\n * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,\n * together with a simple 8-byte copy loop as a fall-back path.\n * However, this optimization hurts the decompression speed by >30%,\n * because the execution does not go to the optimized loop\n * for typical compressible data, and all of the preamble checks\n * before going to the fall-back path become useless overhead.\n * This optimization happens only with the -O3 flag, and -O2 generates\n * a simple 8-byte copy loop.\n * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8\n * functions are annotated with __attribute__((optimize(\"O2\"))),\n * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute\n * of LZ4_wildCopy8 does not affect the compression speed.\n */\n#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)\n# define LZ4_FORCE_O2 __attribute__((optimize(\"O2\")))\n# undef LZ4_FORCE_INLINE\n# define LZ4_FORCE_INLINE static __inline __attribute__((optimize(\"O2\"),always_inline))\n#else\n# define LZ4_FORCE_O2\n#endif\n\n#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)\n# define expect(expr,value) (__builtin_expect ((expr),(value)) )\n#else\n# define expect(expr,value) (expr)\n#endif\n\n#ifndef likely\n#define likely(expr) expect((expr) != 0, 1)\n#endif\n#ifndef unlikely\n#define unlikely(expr) expect((expr) != 0, 0)\n#endif\n\n/* Should the alignment test prove unreliable, for some reason,\n * it can be disabled by setting LZ4_ALIGN_TEST to 0 */\n#ifndef LZ4_ALIGN_TEST /* can be externally provided */\n# define LZ4_ALIGN_TEST 1\n#endif\n\n\n/*-************************************\n* Memory routines\n**************************************/\n\n/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :\n * Disable relatively high-level LZ4/HC functions that use dynamic memory\n * allocation functions (malloc(), calloc(), free()).\n *\n * Note that this is a compile-time switch. And since it disables\n * public/stable LZ4 v1 API functions, we don't recommend using this\n * symbol to generate a library for distribution.\n *\n * The following public functions are removed when this symbol is defined.\n * - lz4 : LZ4_createStream, LZ4_freeStream,\n * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)\n * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,\n * LZ4_createHC (deprecated), LZ4_freeHC (deprecated)\n * - lz4frame, lz4file : All LZ4F_* functions\n */\n#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\n# define ALLOC(s) lz4_error_memory_allocation_is_disabled\n# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled\n# define FREEMEM(p) lz4_error_memory_allocation_is_disabled\n#elif defined(LZ4_USER_MEMORY_FUNCTIONS)\n/* memory management functions can be customized by user project.\n * Below functions must exist somewhere in the Project\n * and be available at link time */\nvoid* LZ4_malloc(size_t s);\nvoid* LZ4_calloc(size_t n, size_t s);\nvoid LZ4_free(void* p);\n# define ALLOC(s) LZ4_malloc(s)\n# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)\n# define FREEMEM(p) LZ4_free(p)\n#else\n# include /* malloc, calloc, free */\n# define ALLOC(s) malloc(s)\n# define ALLOC_AND_ZERO(s) calloc(1,s)\n# define FREEMEM(p) free(p)\n#endif\n\n#if ! LZ4_FREESTANDING\n# include /* memset, memcpy */\n#endif\n#if !defined(LZ4_memset)\n# define LZ4_memset(p,v,s) memset((p),(v),(s))\n#endif\n#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s))\n\n\n/*-************************************\n* Common Constants\n**************************************/\n#define MINMATCH 4\n\n#define WILDCOPYLENGTH 8\n#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */\n#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */\n#define FASTLOOP_SAFE_DISTANCE 64\nstatic const int LZ4_minLength = (MFLIMIT+1);\n\n#define KB *(1 <<10)\n#define MB *(1 <<20)\n#define GB *(1U<<30)\n\n#define LZ4_DISTANCE_ABSOLUTE_MAX 65535\n#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */\n# error \"LZ4_DISTANCE_MAX is too big : must be <= 65535\"\n#endif\n\n#define ML_BITS 4\n#define ML_MASK ((1U<=1)\n# include \n#else\n# ifndef assert\n# define assert(condition) ((void)0)\n# endif\n#endif\n\n#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */\n\n#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)\n# include \n static int g_debuglog_enable = 1;\n# define DEBUGLOG(l, ...) { \\\n if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \\\n fprintf(stderr, __FILE__ \" %i: \", __LINE__); \\\n fprintf(stderr, __VA_ARGS__); \\\n fprintf(stderr, \" \\n\"); \\\n } }\n#else\n# define DEBUGLOG(l, ...) {} /* disabled */\n#endif\n\nstatic int LZ4_isAligned(const void* ptr, size_t alignment)\n{\n return ((size_t)ptr & (alignment -1)) == 0;\n}\n\n\n/*-************************************\n* Types\n**************************************/\n#include \n#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n# include \n typedef uint8_t BYTE;\n typedef uint16_t U16;\n typedef uint32_t U32;\n typedef int32_t S32;\n typedef uint64_t U64;\n typedef uintptr_t uptrval;\n#else\n# if UINT_MAX != 4294967295UL\n# error \"LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4\"\n# endif\n typedef unsigned char BYTE;\n typedef unsigned short U16;\n typedef unsigned int U32;\n typedef signed int S32;\n typedef unsigned long long U64;\n typedef size_t uptrval; /* generally true, except OpenVMS-64 */\n#endif\n\n#if defined(__x86_64__)\n typedef U64 reg_t; /* 64-bits in x32 mode */\n#else\n typedef size_t reg_t; /* 32-bits in x32 mode */\n#endif\n\ntypedef enum {\n notLimited = 0,\n limitedOutput = 1,\n fillOutput = 2\n} limitedOutput_directive;\n\n\n/*-************************************\n* Reading and writing into memory\n**************************************/\n\n/**\n * LZ4 relies on memcpy with a constant size being inlined. In freestanding\n * environments, the compiler can't assume the implementation of memcpy() is\n * standard compliant, so it can't apply its specialized memcpy() inlining\n * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze\n * memcpy() as if it were standard compliant, so it can inline it in freestanding\n * environments. This is needed when decompressing the Linux Kernel, for example.\n */\n#if !defined(LZ4_memcpy)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)\n# else\n# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)\n# endif\n#endif\n\n#if !defined(LZ4_memmove)\n# if defined(__GNUC__) && (__GNUC__ >= 4)\n# define LZ4_memmove __builtin_memmove\n# else\n# define LZ4_memmove memmove\n# endif\n#endif\n\nstatic unsigned LZ4_isLittleEndian(void)\n{\n const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */\n return one.c[0];\n}\n\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))\n#elif defined(_MSC_VER)\n#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))\n#endif\n\n#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)\n/* lie to the compiler about data alignment; use with caution */\n\nstatic U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }\nstatic U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }\nstatic reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }\n\n#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\nLZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;\nLZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;\nLZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;\n\nstatic U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }\nstatic U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }\nstatic reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }\n\nstatic void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }\nstatic void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }\n\n#else /* safe and portable access using memcpy() */\n\nstatic U16 LZ4_read16(const void* memPtr)\n{\n U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic U32 LZ4_read32(const void* memPtr)\n{\n U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic reg_t LZ4_read_ARCH(const void* memPtr)\n{\n reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;\n}\n\nstatic void LZ4_write16(void* memPtr, U16 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\nstatic void LZ4_write32(void* memPtr, U32 value)\n{\n LZ4_memcpy(memPtr, &value, sizeof(value));\n}\n\n#endif /* LZ4_FORCE_MEMORY_ACCESS */\n\n\nstatic U16 LZ4_readLE16(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read16(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U16)((U16)p[0] | (p[1]<<8));\n }\n}\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\nstatic U32 LZ4_readLE32(const void* memPtr)\n{\n if (LZ4_isLittleEndian()) {\n return LZ4_read32(memPtr);\n } else {\n const BYTE* p = (const BYTE*)memPtr;\n return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);\n }\n}\n#endif\n\nstatic void LZ4_writeLE16(void* memPtr, U16 value)\n{\n if (LZ4_isLittleEndian()) {\n LZ4_write16(memPtr, value);\n } else {\n BYTE* p = (BYTE*)memPtr;\n p[0] = (BYTE) value;\n p[1] = (BYTE)(value>>8);\n }\n}\n\n/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */\nLZ4_FORCE_INLINE\nvoid LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */\nLZ4_FORCE_INLINE void\nLZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)\n{\n BYTE* d = (BYTE*)dstPtr;\n const BYTE* s = (const BYTE*)srcPtr;\n BYTE* const e = (BYTE*)dstEnd;\n\n do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH\n * - there is at least 12 bytes available to write after dstEnd */\nLZ4_FORCE_INLINE void\nLZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)\n{\n BYTE v[8];\n\n assert(dstEnd >= dstPtr + MINMATCH);\n\n switch(offset) {\n case 1:\n MEM_INIT(v, *srcPtr, 8);\n break;\n case 2:\n LZ4_memcpy(v, srcPtr, 2);\n LZ4_memcpy(&v[2], srcPtr, 2);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(push)\n# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */\n#endif\n LZ4_memcpy(&v[4], v, 4);\n#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */\n# pragma warning(pop)\n#endif\n break;\n case 4:\n LZ4_memcpy(v, srcPtr, 4);\n LZ4_memcpy(&v[4], srcPtr, 4);\n break;\n default:\n LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);\n return;\n }\n\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n while (dstPtr < dstEnd) {\n LZ4_memcpy(dstPtr, v, 8);\n dstPtr += 8;\n }\n}\n#endif\n\n\n/*-************************************\n* Common functions\n**************************************/\nstatic unsigned LZ4_NbCommonBytes (reg_t val)\n{\n assert(val != 0);\n if (LZ4_isLittleEndian()) {\n if (sizeof(val) == 8) {\n# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)\n/*-*************************************************************************************************\n* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.\n* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics\n* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.\n****************************************************************************************************/\n# if defined(__clang__) && (__clang_major__ < 10)\n /* Avoid undefined clang-cl intrinsics issue.\n * See https://github.com/lz4/lz4/pull/1017 for details. */\n return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;\n# else\n /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */\n return (unsigned)_tzcnt_u64(val) >> 3;\n# endif\n# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r = 0;\n _BitScanForward64(&r, (U64)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctzll((U64)val) >> 3;\n# else\n const U64 m = 0x0101010101010101ULL;\n val ^= val - 1;\n return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);\n# endif\n } else /* 32 bits */ {\n# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)\n unsigned long r;\n _BitScanForward(&r, (U32)val);\n return (unsigned)r >> 3;\n# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_ctz((U32)val) >> 3;\n# else\n const U32 m = 0x01010101;\n return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;\n# endif\n }\n } else /* Big Endian CPU */ {\n if (sizeof(val)==8) {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clzll((U64)val) >> 3;\n# else\n#if 1\n /* this method is probably faster,\n * but adds a 128 bytes lookup table */\n static const unsigned char ctz7_tab[128] = {\n 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,\n };\n U64 const mask = 0x0101010101010101ULL;\n U64 const t = (((val >> 8) - mask) | val) & mask;\n return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];\n#else\n /* this method doesn't consume memory space like the previous one,\n * but it contains several branches,\n * that may end up slowing execution */\n static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.\n Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.\n Note that this code path is never triggered in 32-bits mode. */\n unsigned r;\n if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }\n if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }\n r += (!val);\n return r;\n#endif\n# endif\n } else /* 32 bits */ {\n# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \\\n ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \\\n !defined(LZ4_FORCE_SW_BITCOUNT)\n return (unsigned)__builtin_clz((U32)val) >> 3;\n# else\n val >>= 8;\n val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |\n (val + 0x00FF0000)) >> 24;\n return (unsigned)val ^ 3;\n# endif\n }\n }\n}\n\n\n#define STEPSIZE sizeof(reg_t)\nLZ4_FORCE_INLINE\nunsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)\n{\n const BYTE* const pStart = pIn;\n\n if (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) {\n pIn+=STEPSIZE; pMatch+=STEPSIZE;\n } else {\n return LZ4_NbCommonBytes(diff);\n } }\n\n while (likely(pIn < pInLimit-(STEPSIZE-1))) {\n reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);\n if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }\n pIn += LZ4_NbCommonBytes(diff);\n return (unsigned)(pIn - pStart);\n }\n\n if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }\n if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }\n if ((pIn compression run slower on incompressible data */\n\n\n/*-************************************\n* Local Structures and types\n**************************************/\ntypedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;\n\n/**\n * This enum distinguishes several different modes of accessing previous\n * content in the stream.\n *\n * - noDict : There is no preceding content.\n * - withPrefix64k : Table entries up to ctx->dictSize before the current blob\n * blob being compressed are valid and refer to the preceding\n * content (of length ctx->dictSize), which is available\n * contiguously preceding in memory the content currently\n * being compressed.\n * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere\n * else in memory, starting at ctx->dictionary with length\n * ctx->dictSize.\n * - usingDictCtx : Everything concerning the preceding content is\n * in a separate context, pointed to by ctx->dictCtx.\n * ctx->dictionary, ctx->dictSize, and table entries\n * in the current context that refer to positions\n * preceding the beginning of the current compression are\n * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx\n * ->dictSize describe the location and size of the preceding\n * content, and matches are found by looking in the ctx\n * ->dictCtx->hashTable.\n */\ntypedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;\ntypedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;\n\n\n/*-************************************\n* Local Utils\n**************************************/\nint LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }\nconst char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }\nint LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }\nint LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }\n\n\n/*-****************************************\n* Internal Definitions, used only in Tests\n*******************************************/\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);\n\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize);\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize);\n#if defined (__cplusplus)\n}\n#endif\n\n/*-******************************\n* Compression functions\n********************************/\nLZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)\n{\n if (tableType == byU16)\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));\n else\n return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)\n{\n const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;\n if (LZ4_isLittleEndian()) {\n const U64 prime5bytes = 889523592379ULL;\n return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));\n } else {\n const U64 prime8bytes = 11400714785074694791ULL;\n return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));\n }\n}\n\nLZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)\n{\n if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);\n\n#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT\n return LZ4_hash4(LZ4_readLE32(p), tableType);\n#else\n return LZ4_hash4(LZ4_read32(p), tableType);\n#endif\n}\n\nLZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: { /* illegal! */ assert(0); return; }\n case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }\n }\n}\n\nLZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)\n{\n switch (tableType)\n {\n default: /* fallthrough */\n case clearedTable: /* fallthrough */\n case byPtr: { /* illegal! */ assert(0); return; }\n case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }\n case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }\n }\n}\n\n/* LZ4_putPosition*() : only used in byPtr mode */\nLZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,\n void* tableBase, tableType_t const tableType)\n{\n const BYTE** const hashTable = (const BYTE**)tableBase;\n assert(tableType == byPtr); (void)tableType;\n hashTable[h] = p;\n}\n\nLZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n LZ4_putPositionOnHash(p, h, tableBase, tableType);\n}\n\n/* LZ4_getIndexOnHash() :\n * Index of match position registered in hash table.\n * hash position must be calculated by using base+index, or dictBase+index.\n * Assumption 1 : only valid if tableType == byU32 or byU16.\n * Assumption 2 : h is presumed valid (within limits of hash table)\n */\nLZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);\n if (tableType == byU32) {\n const U32* const hashTable = (const U32*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-2)));\n return hashTable[h];\n }\n if (tableType == byU16) {\n const U16* const hashTable = (const U16*) tableBase;\n assert(h < (1U << (LZ4_MEMORY_USAGE-1)));\n return hashTable[h];\n }\n assert(0); return 0; /* forbidden case */\n}\n\nstatic const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)\n{\n assert(tableType == byPtr); (void)tableType;\n { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }\n}\n\nLZ4_FORCE_INLINE const BYTE*\nLZ4_getPosition(const BYTE* p,\n const void* tableBase, tableType_t tableType)\n{\n U32 const h = LZ4_hashPosition(p, tableType);\n return LZ4_getPositionOnHash(h, tableBase, tableType);\n}\n\nLZ4_FORCE_INLINE void\nLZ4_prepareTable(LZ4_stream_t_internal* const cctx,\n const int inputSize,\n const tableType_t tableType) {\n /* If the table hasn't been used, it's guaranteed to be zeroed out, and is\n * therefore safe to use no matter what mode we're in. Otherwise, we figure\n * out if it's safe to leave as is or whether it needs to be reset.\n */\n if ((tableType_t)cctx->tableType != clearedTable) {\n assert(inputSize >= 0);\n if ((tableType_t)cctx->tableType != tableType\n || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)\n || ((tableType == byU32) && cctx->currentOffset > 1 GB)\n || tableType == byPtr\n || inputSize >= 4 KB)\n {\n DEBUGLOG(4, \"LZ4_prepareTable: Resetting table in %p\", cctx);\n MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);\n cctx->currentOffset = 0;\n cctx->tableType = (U32)clearedTable;\n } else {\n DEBUGLOG(4, \"LZ4_prepareTable: Re-use hash table (no reset)\");\n }\n }\n\n /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,\n * is faster than compressing without a gap.\n * However, compressing with currentOffset == 0 is faster still,\n * so we preserve that case.\n */\n if (cctx->currentOffset != 0 && tableType == byU32) {\n DEBUGLOG(5, \"LZ4_prepareTable: adding 64KB to currentOffset\");\n cctx->currentOffset += 64 KB;\n }\n\n /* Finally, clear history */\n cctx->dictCtx = NULL;\n cctx->dictionary = NULL;\n cctx->dictSize = 0;\n}\n\n/** LZ4_compress_generic_validated() :\n * inlined, to ensure branches are decided at compilation time.\n * The following conditions are presumed already validated:\n * - source != NULL\n * - inputSize > 0\n */\nLZ4_FORCE_INLINE int LZ4_compress_generic_validated(\n LZ4_stream_t_internal* const cctx,\n const char* const source,\n char* const dest,\n const int inputSize,\n int* inputConsumed, /* only written when outputDirective == fillOutput */\n const int maxOutputSize,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n int result;\n const BYTE* ip = (const BYTE*)source;\n\n U32 const startIndex = cctx->currentOffset;\n const BYTE* base = (const BYTE*)source - startIndex;\n const BYTE* lowLimit;\n\n const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;\n const BYTE* const dictionary =\n dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;\n const U32 dictSize =\n dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;\n const U32 dictDelta =\n (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */\n\n int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);\n U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */\n const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;\n const BYTE* anchor = (const BYTE*) source;\n const BYTE* const iend = ip + inputSize;\n const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;\n const BYTE* const matchlimit = iend - LASTLITERALS;\n\n /* the dictCtx currentOffset is indexed on the start of the dictionary,\n * while a dictionary in the current context precedes the currentOffset */\n const BYTE* dictBase = (dictionary == NULL) ? NULL :\n (dictDirective == usingDictCtx) ?\n dictionary + dictSize - dictCtx->currentOffset :\n dictionary + dictSize - startIndex;\n\n BYTE* op = (BYTE*) dest;\n BYTE* const olimit = op + maxOutputSize;\n\n U32 offset = 0;\n U32 forwardH;\n\n DEBUGLOG(5, \"LZ4_compress_generic_validated: srcSize=%i, tableType=%u\", inputSize, tableType);\n assert(ip != NULL);\n if (tableType == byU16) assert(inputSize= 1);\n\n lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);\n\n /* Update context state */\n if (dictDirective == usingDictCtx) {\n /* Subsequent linked blocks can't use the dictionary. */\n /* Instead, they use the block we just compressed. */\n cctx->dictCtx = NULL;\n cctx->dictSize = (U32)inputSize;\n } else {\n cctx->dictSize += (U32)inputSize;\n }\n cctx->currentOffset += (U32)inputSize;\n cctx->tableType = (U32)tableType;\n\n if (inputSizehashTable, byPtr);\n } else {\n LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);\n } }\n ip++; forwardH = LZ4_hashPosition(ip, tableType);\n\n /* Main Loop */\n for ( ; ; ) {\n const BYTE* match;\n BYTE* token;\n const BYTE* filledIp;\n\n /* Find a match */\n if (tableType == byPtr) {\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);\n\n } while ( (match+LZ4_DISTANCE_MAX < ip)\n || (LZ4_read32(match) != LZ4_read32(ip)) );\n\n } else { /* byU32, byU16 */\n\n const BYTE* forwardIp = ip;\n int step = 1;\n int searchMatchNb = acceleration << LZ4_skipTrigger;\n do {\n U32 const h = forwardH;\n U32 const current = (U32)(forwardIp - base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex <= current);\n assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));\n ip = forwardIp;\n forwardIp += step;\n step = (searchMatchNb++ >> LZ4_skipTrigger);\n\n if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;\n assert(ip < mflimitPlusOne);\n\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n matchIndex += dictDelta; /* make dictCtx index comparable with current context */\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else if (dictDirective == usingExtDict) {\n if (matchIndex < startIndex) {\n DEBUGLOG(7, \"extDict candidate: matchIndex=%5u < startIndex=%5u\", matchIndex, startIndex);\n assert(startIndex - matchIndex >= MINMATCH);\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source;\n }\n } else { /* single continuous memory segment */\n match = base + matchIndex;\n }\n forwardH = LZ4_hashPosition(forwardIp, tableType);\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n\n DEBUGLOG(7, \"candidate at pos=%u (offset=%u \\n\", matchIndex, current - matchIndex);\n if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */\n assert(matchIndex < current);\n if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))\n && (matchIndex+LZ4_DISTANCE_MAX < current)) {\n continue;\n } /* too far */\n assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */\n\n if (LZ4_read32(match) == LZ4_read32(ip)) {\n if (maybe_extMem) offset = current - matchIndex;\n break; /* match found */\n }\n\n } while(1);\n }\n\n /* Catch up */\n filledIp = ip;\n assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */\n if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {\n do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));\n }\n\n /* Encode Literals */\n { unsigned const litLength = (unsigned)(ip - anchor);\n token = op++;\n if ((outputDirective == limitedOutput) && /* Check output buffer overflow */\n (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n if ((outputDirective == fillOutput) &&\n (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {\n op--;\n goto _last_literals;\n }\n if (litLength >= RUN_MASK) {\n unsigned len = litLength - RUN_MASK;\n *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255;\n *op++ = (BYTE)len;\n }\n else *token = (BYTE)(litLength< olimit)) {\n /* the match was too close to the end, rewind and go to last literals */\n op = token;\n goto _last_literals;\n }\n\n /* Encode Offset */\n if (maybe_extMem) { /* static test */\n DEBUGLOG(6, \" with offset=%u (ext if > %i)\", offset, (int)(ip - (const BYTE*)source));\n assert(offset <= LZ4_DISTANCE_MAX && offset > 0);\n LZ4_writeLE16(op, (U16)offset); op+=2;\n } else {\n DEBUGLOG(6, \" with offset=%u (same segment)\", (U32)(ip - match));\n assert(ip-match <= LZ4_DISTANCE_MAX);\n LZ4_writeLE16(op, (U16)(ip - match)); op+=2;\n }\n\n /* Encode MatchLength */\n { unsigned matchCode;\n\n if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)\n && (lowLimit==dictionary) /* match within extDict */ ) {\n const BYTE* limit = ip + (dictEnd-match);\n assert(dictEnd > match);\n if (limit > matchlimit) limit = matchlimit;\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);\n ip += (size_t)matchCode + MINMATCH;\n if (ip==limit) {\n unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);\n matchCode += more;\n ip += more;\n }\n DEBUGLOG(6, \" with matchLength=%u starting in extDict\", matchCode+MINMATCH);\n } else {\n matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);\n ip += (size_t)matchCode + MINMATCH;\n DEBUGLOG(6, \" with matchLength=%u\", matchCode+MINMATCH);\n }\n\n if ((outputDirective) && /* Check output buffer overflow */\n (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {\n if (outputDirective == fillOutput) {\n /* Match description too long : reduce it */\n U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;\n ip -= matchCode - newMatchCode;\n assert(newMatchCode < matchCode);\n matchCode = newMatchCode;\n if (unlikely(ip <= filledIp)) {\n /* We have already filled up to filledIp so if ip ends up less than filledIp\n * we have positions in the hash table beyond the current position. This is\n * a problem if we reuse the hash table. So we have to remove these positions\n * from the hash table.\n */\n const BYTE* ptr;\n DEBUGLOG(5, \"Clearing %u positions\", (U32)(filledIp - ip));\n for (ptr = ip; ptr <= filledIp; ++ptr) {\n U32 const h = LZ4_hashPosition(ptr, tableType);\n LZ4_clearHash(h, cctx->hashTable, tableType);\n }\n }\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n if (matchCode >= ML_MASK) {\n *token += ML_MASK;\n matchCode -= ML_MASK;\n LZ4_write32(op, 0xFFFFFFFF);\n while (matchCode >= 4*255) {\n op+=4;\n LZ4_write32(op, 0xFFFFFFFF);\n matchCode -= 4*255;\n }\n op += matchCode / 255;\n *op++ = (BYTE)(matchCode % 255);\n } else\n *token += (BYTE)(matchCode);\n }\n /* Ensure we have enough space for the last literals. */\n assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));\n\n anchor = ip;\n\n /* Test end of chunk */\n if (ip >= mflimitPlusOne) break;\n\n /* Fill table */\n { U32 const h = LZ4_hashPosition(ip-2, tableType);\n if (tableType == byPtr) {\n LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);\n } else {\n U32 const idx = (U32)((ip-2) - base);\n LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);\n } }\n\n /* Test next position */\n if (tableType == byPtr) {\n\n match = LZ4_getPosition(ip, cctx->hashTable, tableType);\n LZ4_putPosition(ip, cctx->hashTable, tableType);\n if ( (match+LZ4_DISTANCE_MAX >= ip)\n && (LZ4_read32(match) == LZ4_read32(ip)) )\n { token=op++; *token=0; goto _next_match; }\n\n } else { /* byU32, byU16 */\n\n U32 const h = LZ4_hashPosition(ip, tableType);\n U32 const current = (U32)(ip-base);\n U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if (dictDirective == usingDictCtx) {\n if (matchIndex < startIndex) {\n /* there was no match, try the dictionary */\n assert(tableType == byU32);\n matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n matchIndex += dictDelta;\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else if (dictDirective==usingExtDict) {\n if (matchIndex < startIndex) {\n assert(dictBase);\n match = dictBase + matchIndex;\n lowLimit = dictionary; /* required for match length counter */\n } else {\n match = base + matchIndex;\n lowLimit = (const BYTE*)source; /* required for match length counter */\n }\n } else { /* single memory segment */\n match = base + matchIndex;\n }\n LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);\n assert(matchIndex < current);\n if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)\n && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))\n && (LZ4_read32(match) == LZ4_read32(ip)) ) {\n token=op++;\n *token=0;\n if (maybe_extMem) offset = current - matchIndex;\n DEBUGLOG(6, \"seq.start:%i, literals=%u, match.start:%i\",\n (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));\n goto _next_match;\n }\n }\n\n /* Prepare next loop */\n forwardH = LZ4_hashPosition(++ip, tableType);\n\n }\n\n_last_literals:\n /* Encode Last Literals */\n { size_t lastRun = (size_t)(iend - anchor);\n if ( (outputDirective) && /* Check output buffer overflow */\n (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {\n if (outputDirective == fillOutput) {\n /* adapt lastRun to fill 'dst' */\n assert(olimit >= op);\n lastRun = (size_t)(olimit-op) - 1/*token*/;\n lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/\n } else {\n assert(outputDirective == limitedOutput);\n return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */\n }\n }\n DEBUGLOG(6, \"Final literal run : %i literals\", (int)lastRun);\n if (lastRun >= RUN_MASK) {\n size_t accumulator = lastRun - RUN_MASK;\n *op++ = RUN_MASK << ML_BITS;\n for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;\n *op++ = (BYTE) accumulator;\n } else {\n *op++ = (BYTE)(lastRun< 0);\n DEBUGLOG(5, \"LZ4_compress_generic: compressed %i bytes into %i bytes\", inputSize, result);\n return result;\n}\n\n/** LZ4_compress_generic() :\n * inlined, to ensure branches are decided at compilation time;\n * takes care of src == (NULL, 0)\n * and forward the rest to LZ4_compress_generic_validated */\nLZ4_FORCE_INLINE int LZ4_compress_generic(\n LZ4_stream_t_internal* const cctx,\n const char* const src,\n char* const dst,\n const int srcSize,\n int *inputConsumed, /* only written when outputDirective == fillOutput */\n const int dstCapacity,\n const limitedOutput_directive outputDirective,\n const tableType_t tableType,\n const dict_directive dictDirective,\n const dictIssue_directive dictIssue,\n const int acceleration)\n{\n DEBUGLOG(5, \"LZ4_compress_generic: srcSize=%i, dstCapacity=%i\",\n srcSize, dstCapacity);\n\n if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */\n if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */\n if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */\n DEBUGLOG(5, \"Generating an empty block\");\n assert(outputDirective == notLimited || dstCapacity >= 1);\n assert(dst != NULL);\n dst[0] = 0;\n if (outputDirective == fillOutput) {\n assert (inputConsumed != NULL);\n *inputConsumed = 0;\n }\n return 1;\n }\n assert(src != NULL);\n\n return LZ4_compress_generic_validated(cctx, src, dst, srcSize,\n inputConsumed, /* only written into if outputDirective == fillOutput */\n dstCapacity, outputDirective,\n tableType, dictDirective, dictIssue, acceleration);\n}\n\n\nint LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;\n assert(ctx != NULL);\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n if (maxOutputSize >= LZ4_compressBound(inputSize)) {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (inputSize < LZ4_64Klimit) {\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n/**\n * LZ4_compress_fast_extState_fastReset() :\n * A variant of LZ4_compress_fast_extState().\n *\n * Using this variant avoids an expensive initialization step. It is only safe\n * to call if the state buffer is known to be correctly initialized already\n * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of\n * \"correctly initialized\").\n */\nint LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)\n{\n LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n assert(ctx != NULL);\n\n if (dstCapacity >= LZ4_compressBound(srcSize)) {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n if (srcSize < LZ4_64Klimit) {\n const tableType_t tableType = byU16;\n LZ4_prepareTable(ctx, srcSize, tableType);\n if (ctx->currentOffset) {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);\n } else {\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n } else {\n const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);\n }\n }\n}\n\n\nint LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)\n{\n int result;\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctxPtr == NULL) return 0;\n#else\n LZ4_stream_t ctx;\n LZ4_stream_t* const ctxPtr = &ctx;\n#endif\n result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctxPtr);\n#endif\n return result;\n}\n\n\nint LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);\n}\n\n\n/* Note!: This function leaves the stream in an unclean/broken state!\n * It is not safe to subsequently use the same state with a _fastReset() or\n * _continue() call without resetting it. */\nstatic int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n void* const s = LZ4_initStream(state, sizeof (*state));\n assert(s != NULL); (void)s;\n\n if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */\n return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);\n } else {\n if (*srcSizePtr < LZ4_64Klimit) {\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);\n } else {\n tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;\n return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);\n } }\n}\n\nint LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)\n{\n int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);\n /* clean the state on exit */\n LZ4_initStream(state, sizeof (LZ4_stream_t));\n return r;\n}\n\n\nint LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)\n{\n#if (LZ4_HEAPMODE)\n LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */\n if (ctx == NULL) return 0;\n#else\n LZ4_stream_t ctxBody;\n LZ4_stream_t* const ctx = &ctxBody;\n#endif\n\n int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);\n\n#if (LZ4_HEAPMODE)\n FREEMEM(ctx);\n#endif\n return result;\n}\n\n\n\n/*-******************************\n* Streaming functions\n********************************/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_stream_t* LZ4_createStream(void)\n{\n LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));\n LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));\n DEBUGLOG(4, \"LZ4_createStream %p\", lz4s);\n if (lz4s == NULL) return NULL;\n LZ4_initStream(lz4s, sizeof(*lz4s));\n return lz4s;\n}\n#endif\n\nstatic size_t LZ4_stream_t_alignment(void)\n{\n#if LZ4_ALIGN_TEST\n typedef struct { char c; LZ4_stream_t t; } t_a;\n return sizeof(t_a) - sizeof(LZ4_stream_t);\n#else\n return 1; /* effectively disabled */\n#endif\n}\n\nLZ4_stream_t* LZ4_initStream (void* buffer, size_t size)\n{\n DEBUGLOG(5, \"LZ4_initStream\");\n if (buffer == NULL) { return NULL; }\n if (size < sizeof(LZ4_stream_t)) { return NULL; }\n if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;\n MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));\n return (LZ4_stream_t*)buffer;\n}\n\n/* resetStream is now deprecated,\n * prefer initStream() which is more general */\nvoid LZ4_resetStream (LZ4_stream_t* LZ4_stream)\n{\n DEBUGLOG(5, \"LZ4_resetStream (ctx:%p)\", LZ4_stream);\n MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));\n}\n\nvoid LZ4_resetStream_fast(LZ4_stream_t* ctx) {\n LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nint LZ4_freeStream (LZ4_stream_t* LZ4_stream)\n{\n if (!LZ4_stream) return 0; /* support free on NULL */\n DEBUGLOG(5, \"LZ4_freeStream %p\", LZ4_stream);\n FREEMEM(LZ4_stream);\n return (0);\n}\n#endif\n\n\ntypedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;\n#define HASH_UNIT sizeof(reg_t)\nint LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,\n const char* dictionary, int dictSize,\n LoadDict_mode_e _ld)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n const tableType_t tableType = byU32;\n const BYTE* p = (const BYTE*)dictionary;\n const BYTE* const dictEnd = p + dictSize;\n U32 idx32;\n\n DEBUGLOG(4, \"LZ4_loadDict (%i bytes from %p into %p)\", dictSize, dictionary, LZ4_dict);\n\n /* It's necessary to reset the context,\n * and not just continue it with prepareTable()\n * to avoid any risk of generating overflowing matchIndex\n * when compressing using this dictionary */\n LZ4_resetStream(LZ4_dict);\n\n /* We always increment the offset by 64 KB, since, if the dict is longer,\n * we truncate it to the last 64k, and if it's shorter, we still want to\n * advance by a whole window length so we can provide the guarantee that\n * there are only valid offsets in the window, which allows an optimization\n * in LZ4_compress_fast_continue() where it uses noDictIssue even when the\n * dictionary isn't a full 64k. */\n dict->currentOffset += 64 KB;\n\n if (dictSize < (int)HASH_UNIT) {\n return 0;\n }\n\n if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;\n dict->dictionary = p;\n dict->dictSize = (U32)(dictEnd - p);\n dict->tableType = (U32)tableType;\n idx32 = dict->currentOffset - dict->dictSize;\n\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n /* Note: overwriting => favors positions end of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n p+=3; idx32+=3;\n }\n\n if (_ld == _ld_slow) {\n /* Fill hash table with additional references, to improve compression capability */\n p = dict->dictionary;\n idx32 = dict->currentOffset - dict->dictSize;\n while (p <= dictEnd-HASH_UNIT) {\n U32 const h = LZ4_hashPosition(p, tableType);\n U32 const limit = dict->currentOffset - 64 KB;\n if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {\n /* Note: not overwriting => favors positions beginning of dictionary */\n LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);\n }\n p++; idx32++;\n }\n }\n\n return (int)dict->dictSize;\n}\n\nint LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);\n}\n\nint LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)\n{\n return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);\n}\n\nvoid LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)\n{\n const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :\n &(dictionaryStream->internal_donotuse);\n\n DEBUGLOG(4, \"LZ4_attach_dictionary (%p, %p, size %u)\",\n workingStream, dictionaryStream,\n dictCtx != NULL ? dictCtx->dictSize : 0);\n\n if (dictCtx != NULL) {\n /* If the current offset is zero, we will never look in the\n * external dictionary context, since there is no value a table\n * entry can take that indicate a miss. In that case, we need\n * to bump the offset to something non-zero.\n */\n if (workingStream->internal_donotuse.currentOffset == 0) {\n workingStream->internal_donotuse.currentOffset = 64 KB;\n }\n\n /* Don't actually attach an empty dictionary.\n */\n if (dictCtx->dictSize == 0) {\n dictCtx = NULL;\n }\n }\n workingStream->internal_donotuse.dictCtx = dictCtx;\n}\n\n\nstatic void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)\n{\n assert(nextSize >= 0);\n if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */\n /* rescale hash table */\n U32 const delta = LZ4_dict->currentOffset - 64 KB;\n const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;\n int i;\n DEBUGLOG(4, \"LZ4_renormDictT\");\n for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0;\n else LZ4_dict->hashTable[i] -= delta;\n }\n LZ4_dict->currentOffset = 64 KB;\n if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;\n LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;\n }\n}\n\n\nint LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,\n const char* source, char* dest,\n int inputSize, int maxOutputSize,\n int acceleration)\n{\n const tableType_t tableType = byU32;\n LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;\n const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;\n\n DEBUGLOG(5, \"LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)\", inputSize, streamPtr->dictSize);\n\n LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */\n if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;\n if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;\n\n /* invalidate tiny dictionaries */\n if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */\n && (dictEnd != source) /* prefix mode */\n && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */\n && (streamPtr->dictCtx == NULL) /* usingDictCtx */\n ) {\n DEBUGLOG(5, \"LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small\", streamPtr->dictSize, streamPtr->dictionary);\n /* remove dictionary existence from history, to employ faster prefix mode */\n streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)source;\n dictEnd = source;\n }\n\n /* Check overlapping input/dictionary space */\n { const char* const sourceEnd = source + inputSize;\n if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {\n streamPtr->dictSize = (U32)(dictEnd - sourceEnd);\n if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;\n if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;\n streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;\n }\n }\n\n /* prefix mode : source data follows dictionary */\n if (dictEnd == source) {\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);\n else\n return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);\n }\n\n /* external dictionary mode */\n { int result;\n if (streamPtr->dictCtx) {\n /* We depend here on the fact that dictCtx'es (produced by\n * LZ4_loadDict) guarantee that their tables contain no references\n * to offsets between dictCtx->currentOffset - 64 KB and\n * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe\n * to use noDictIssue even when the dict isn't a full 64 KB.\n */\n if (inputSize > 4 KB) {\n /* For compressing large blobs, it is faster to pay the setup\n * cost to copy the dictionary's tables into the active context,\n * so that the compression loop is only looking into one table.\n */\n LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);\n }\n } else { /* small data <= 4 KB */\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);\n }\n }\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)inputSize;\n return result;\n }\n}\n\n\n/* Hidden debug function, to force-test external dictionary mode */\n", "suffix_code": "\n\n\n/*! LZ4_saveDict() :\n * If previously compressed data block is not guaranteed to remain available at its memory location,\n * save it into a safer place (char* safeBuffer).\n * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,\n * one can therefore call LZ4_compress_fast_continue() right after.\n * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.\n */\nint LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)\n{\n LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;\n\n DEBUGLOG(5, \"LZ4_saveDict : dictSize=%i, safeBuffer=%p\", dictSize, safeBuffer);\n\n if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */\n if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }\n\n if (safeBuffer == NULL) assert(dictSize == 0);\n if (dictSize > 0) {\n const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;\n assert(dict->dictionary);\n LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);\n }\n\n dict->dictionary = (const BYTE*)safeBuffer;\n dict->dictSize = (U32)dictSize;\n\n return dictSize;\n}\n\n\n\n/*-*******************************\n * Decompression functions\n ********************************/\n\ntypedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;\n\n#undef MIN\n#define MIN(a,b) ( (a) < (b) ? (a) : (b) )\n\n\n/* variant for decompress_unsafe()\n * does not know end of input\n * presumes input is well formed\n * note : will consume at least one byte */\nstatic size_t read_long_length_no_check(const BYTE** pp)\n{\n size_t b, l = 0;\n do { b = **pp; (*pp)++; l += b; } while (b==255);\n DEBUGLOG(6, \"read_long_length_no_check: +length=%zu using %zu input bytes\", l, l/255 + 1)\n return l;\n}\n\n/* core decoder variant for LZ4_decompress_fast*()\n * for legacy support only : these entry points are deprecated.\n * - Presumes input is correctly formed (no defense vs malformed inputs)\n * - Does not know input size (presume input buffer is \"large enough\")\n * - Decompress a full block (only)\n * @return : nb of bytes read from input.\n * Note : this variant is not optimized for speed, just for maintenance.\n * the goal is to remove support of decompress_fast*() variants by v2.0\n**/\nLZ4_FORCE_INLINE int\nLZ4_decompress_unsafe_generic(\n const BYTE* const istart,\n BYTE* const ostart,\n int decompressedSize,\n\n size_t prefixSize,\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note: =0 if dictStart==NULL */\n )\n{\n const BYTE* ip = istart;\n BYTE* op = (BYTE*)ostart;\n BYTE* const oend = ostart + decompressedSize;\n const BYTE* const prefixStart = ostart - prefixSize;\n\n DEBUGLOG(5, \"LZ4_decompress_unsafe_generic\");\n if (dictStart == NULL) assert(dictSize == 0);\n\n while (1) {\n /* start new sequence */\n unsigned token = *ip++;\n\n /* literals */\n { size_t ll = token >> ML_BITS;\n if (ll==15) {\n /* long literal length */\n ll += read_long_length_no_check(&ip);\n }\n if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */\n LZ4_memmove(op, ip, ll); /* support in-place decompression */\n op += ll;\n ip += ll;\n if ((size_t)(oend-op) < MFLIMIT) {\n if (op==oend) break; /* end of block */\n DEBUGLOG(5, \"invalid: literals end at distance %zi from end of block\", oend-op);\n /* incorrect end of block :\n * last match must start at least MFLIMIT==12 bytes before end of output block */\n return -1;\n } }\n\n /* match */\n { size_t ml = token & 15;\n size_t const offset = LZ4_readLE16(ip);\n ip+=2;\n\n if (ml==15) {\n /* long literal length */\n ml += read_long_length_no_check(&ip);\n }\n ml += MINMATCH;\n\n if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */\n\n { const BYTE* match = op - offset;\n\n /* out of range */\n if (offset > (size_t)(op - prefixStart) + dictSize) {\n DEBUGLOG(6, \"offset out of range\");\n return -1;\n }\n\n /* check special case : extDict */\n if (offset > (size_t)(op - prefixStart)) {\n /* extDict scenario */\n const BYTE* const dictEnd = dictStart + dictSize;\n const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));\n size_t const extml = (size_t)(dictEnd - extMatch);\n if (extml > ml) {\n /* match entirely within extDict */\n LZ4_memmove(op, extMatch, ml);\n op += ml;\n ml = 0;\n } else {\n /* match split between extDict & prefix */\n LZ4_memmove(op, extMatch, extml);\n op += extml;\n ml -= extml;\n }\n match = prefixStart;\n }\n\n /* match copy - slow variant, supporting overlap copy */\n { size_t u;\n for (u=0; u= ipmax before start of loop. Returns initial_error if so.\n * @error (output) - error code. Must be set to 0 before call.\n**/\ntypedef size_t Rvl_t;\nstatic const Rvl_t rvl_error = (Rvl_t)(-1);\nLZ4_FORCE_INLINE Rvl_t\nread_variable_length(const BYTE** ip, const BYTE* ilimit,\n int initial_check)\n{\n Rvl_t s, length = 0;\n assert(ip != NULL);\n assert(*ip != NULL);\n assert(ilimit != NULL);\n if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */\n return rvl_error;\n }\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n if (likely(s != 255)) return length;\n do {\n s = **ip;\n (*ip)++;\n length += s;\n if (unlikely((*ip) > ilimit)) { /* read limit reached */\n return rvl_error;\n }\n /* accumulator overflow detection (32-bit mode only) */\n if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {\n return rvl_error;\n }\n } while (s == 255);\n\n return length;\n}\n\n/*! LZ4_decompress_generic() :\n * This generic decompression function covers all use cases.\n * It shall be instantiated several times, using different sets of directives.\n * Note that it is important for performance that this function really get inlined,\n * in order to remove useless branches during compilation optimization.\n */\nLZ4_FORCE_INLINE int\nLZ4_decompress_generic(\n const char* const src,\n char* const dst,\n int srcSize,\n int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */\n\n earlyEnd_directive partialDecoding, /* full, partial */\n dict_directive dict, /* noDict, withPrefix64k, usingExtDict */\n const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */\n const BYTE* const dictStart, /* only if dict==usingExtDict */\n const size_t dictSize /* note : = 0 if noDict */\n )\n{\n if ((src == NULL) || (outputSize < 0)) { return -1; }\n\n { const BYTE* ip = (const BYTE*) src;\n const BYTE* const iend = ip + srcSize;\n\n BYTE* op = (BYTE*) dst;\n BYTE* const oend = op + outputSize;\n BYTE* cpy;\n\n const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;\n\n const int checkOffset = (dictSize < (int)(64 KB));\n\n\n /* Set up the \"end\" pointers for the shortcut. */\n const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;\n const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;\n\n const BYTE* match;\n size_t offset;\n unsigned token;\n size_t length;\n\n\n DEBUGLOG(5, \"LZ4_decompress_generic (srcSize:%i, dstSize:%i)\", srcSize, outputSize);\n\n /* Special cases */\n assert(lowPrefix <= op);\n if (unlikely(outputSize==0)) {\n /* Empty output buffer */\n if (partialDecoding) return 0;\n return ((srcSize==1) && (*ip==0)) ? 0 : -1;\n }\n if (unlikely(srcSize==0)) { return -1; }\n\n /* LZ4_FAST_DEC_LOOP:\n * designed for modern OoO performance cpus,\n * where copying reliably 32-bytes is preferable to an unpredictable branch.\n * note : fast loop may show a regression for some client arm chips. */\n#if LZ4_FAST_DEC_LOOP\n if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(6, \"move to safe decode loop\");\n goto safe_decode;\n }\n\n /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using fast decode loop\");\n while (1) {\n /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */\n assert(oend - op >= FASTLOOP_SAFE_DISTANCE);\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) {\n DEBUGLOG(6, \"error reading long literal length\");\n goto _output_error;\n }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n\n /* copy literals */\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }\n LZ4_wildCopy32(op, ip, op+length);\n ip += length; op += length;\n } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {\n /* We don't need to check oend, since we check it once for each loop below */\n DEBUGLOG(7, \"copy %u bytes in a 16-bytes stripe\", (unsigned)length);\n /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */\n LZ4_memcpy(op, ip, 16);\n ip += length; op += length;\n } else {\n goto safe_literal_copy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n DEBUGLOG(6, \"blockPos%6u: offset = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)offset);\n match = op - offset;\n assert(match <= op); /* overflow check */\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \" match length token = %u (len==%u)\", (unsigned)length, (unsigned)length+MINMATCH);\n\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) {\n DEBUGLOG(5, \"error reading long match length\");\n goto _output_error;\n }\n length += addl;\n length += MINMATCH;\n DEBUGLOG(7, \" long match length == %u\", (unsigned)length);\n if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n goto safe_match_copy;\n }\n } else {\n length += MINMATCH;\n if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {\n DEBUGLOG(7, \"moving to safe_match_copy (ml==%u)\", (unsigned)length);\n goto safe_match_copy;\n }\n\n /* Fastpath check: skip LZ4_wildCopy32 when true */\n if ((dict == withPrefix64k) || (match >= lowPrefix)) {\n if (offset >= 8) {\n assert(match >= lowPrefix);\n assert(match <= op);\n assert(op + 18 <= oend);\n\n LZ4_memcpy(op, match, 8);\n LZ4_memcpy(op+8, match+8, 8);\n LZ4_memcpy(op+16, match+16, 2);\n op += length;\n continue;\n } } }\n\n if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {\n DEBUGLOG(5, \"Error : pos=%zi, offset=%zi => outside buffers\", op-lowPrefix, op-match);\n goto _output_error;\n }\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) {\n DEBUGLOG(7, \"partialDecoding: dictionary match, close to dstEnd\");\n length = MIN(length, (size_t)(oend-op));\n } else {\n DEBUGLOG(6, \"end-of-block condition violated\")\n goto _output_error;\n } }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) { *op++ = *copyFrom++; }\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n\n /* copy match within block */\n cpy = op + length;\n\n assert((op <= oend) && (oend-op >= 32));\n if (unlikely(offset<16)) {\n LZ4_memcpy_using_offset(op, match, cpy, offset);\n } else {\n LZ4_wildCopy32(op, match, cpy);\n }\n\n op = cpy; /* wildcopy correction */\n }\n safe_decode:\n#endif\n\n /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */\n DEBUGLOG(6, \"using safe decode loop\");\n while (1) {\n assert(ip < iend);\n token = *ip++;\n length = token >> ML_BITS; /* literal length */\n DEBUGLOG(7, \"blockPos%6u: litLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n /* A two-stage shortcut for the most common case:\n * 1) If the literal length is 0..14, and there is enough space,\n * enter the shortcut and copy 16 bytes on behalf of the literals\n * (in the fast mode, only 8 bytes can be safely copied this way).\n * 2) Further if the match length is 4..18, copy 18 bytes in a similar\n * manner; but we ensure that there's enough space in the output for\n * those 18 bytes earlier, upon entering the shortcut (in other words,\n * there is a combined check for both stages).\n */\n if ( (length != RUN_MASK)\n /* strictly \"less than\" on input, to re-enter the loop with at least one byte */\n && likely((ip < shortiend) & (op <= shortoend)) ) {\n /* Copy the literals */\n LZ4_memcpy(op, ip, 16);\n op += length; ip += length;\n\n /* The second stage: prepare for match copying, decode full info.\n * If it doesn't work out, the info won't be wasted. */\n length = token & ML_MASK; /* match length */\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u (len=%u)\", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);\n offset = LZ4_readLE16(ip); ip += 2;\n match = op - offset;\n assert(match <= op); /* check overflow */\n\n /* Do not deal with overlapping matches. */\n if ( (length != ML_MASK)\n && (offset >= 8)\n && (dict==withPrefix64k || match >= lowPrefix) ) {\n /* Copy the match. */\n LZ4_memcpy(op + 0, match + 0, 8);\n LZ4_memcpy(op + 8, match + 8, 8);\n LZ4_memcpy(op +16, match +16, 2);\n op += length + MINMATCH;\n /* Both stages worked, load the next token. */\n continue;\n }\n\n /* The second stage didn't work out, but the info is ready.\n * Propel it right to the point of match copying. */\n goto _copy_match;\n }\n\n /* decode literal length */\n if (length == RUN_MASK) {\n size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */\n if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */\n }\n\n#if LZ4_FAST_DEC_LOOP\n safe_literal_copy:\n#endif\n /* copy literals */\n cpy = op+length;\n\n LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);\n if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {\n /* We've either hit the input parsing restriction or the output parsing restriction.\n * In the normal scenario, decoding a full block, it must be the last sequence,\n * otherwise it's an error (invalid input or dimensions).\n * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.\n */\n if (partialDecoding) {\n /* Since we are partial decoding we may be in this block because of the output parsing\n * restriction, which is not valid since the output buffer is allowed to be undersized.\n */\n DEBUGLOG(7, \"partialDecoding: copying literals, close to input or output end\")\n DEBUGLOG(7, \"partialDecoding: literal length = %u\", (unsigned)length);\n DEBUGLOG(7, \"partialDecoding: remaining space in dstBuffer : %i\", (int)(oend - op));\n DEBUGLOG(7, \"partialDecoding: remaining space in srcBuffer : %i\", (int)(iend - ip));\n /* Finishing in the middle of a literals segment,\n * due to lack of input.\n */\n if (ip+length > iend) {\n length = (size_t)(iend-ip);\n cpy = op + length;\n }\n /* Finishing in the middle of a literals segment,\n * due to lack of output space.\n */\n if (cpy > oend) {\n cpy = oend;\n assert(op<=oend);\n length = (size_t)(oend-op);\n }\n } else {\n /* We must be on the last sequence (or invalid) because of the parsing limitations\n * so check that we exactly consume the input and don't overrun the output buffer.\n */\n if ((ip+length != iend) || (cpy > oend)) {\n DEBUGLOG(5, \"should have been last run of literals\")\n DEBUGLOG(5, \"ip(%p) + length(%i) = %p != iend (%p)\", ip, (int)length, ip+length, iend);\n DEBUGLOG(5, \"or cpy(%p) > (oend-MFLIMIT)(%p)\", cpy, oend-MFLIMIT);\n DEBUGLOG(5, \"after writing %u bytes / %i bytes available\", (unsigned)(op-(BYTE*)dst), outputSize);\n goto _output_error;\n }\n }\n LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */\n ip += length;\n op += length;\n /* Necessarily EOF when !partialDecoding.\n * When partialDecoding, it is EOF if we've either\n * filled the output buffer or\n * can't proceed with reading an offset for following match.\n */\n if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {\n break;\n }\n } else {\n LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */\n ip += length; op = cpy;\n }\n\n /* get offset */\n offset = LZ4_readLE16(ip); ip+=2;\n match = op - offset;\n\n /* get matchlength */\n length = token & ML_MASK;\n DEBUGLOG(7, \"blockPos%6u: matchLength token = %u\", (unsigned)(op-(BYTE*)dst), (unsigned)length);\n\n _copy_match:\n if (length == ML_MASK) {\n size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);\n if (addl == rvl_error) { goto _output_error; }\n length += addl;\n if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */\n }\n length += MINMATCH;\n\n#if LZ4_FAST_DEC_LOOP\n safe_match_copy:\n#endif\n if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */\n /* match starting within external dictionary */\n if ((dict==usingExtDict) && (match < lowPrefix)) {\n assert(dictEnd != NULL);\n if (unlikely(op+length > oend-LASTLITERALS)) {\n if (partialDecoding) length = MIN(length, (size_t)(oend-op));\n else goto _output_error; /* doesn't respect parsing restriction */\n }\n\n if (length <= (size_t)(lowPrefix-match)) {\n /* match fits entirely within external dictionary : just copy */\n LZ4_memmove(op, dictEnd - (lowPrefix-match), length);\n op += length;\n } else {\n /* match stretches into both external dictionary and current block */\n size_t const copySize = (size_t)(lowPrefix - match);\n size_t const restSize = length - copySize;\n LZ4_memcpy(op, dictEnd - copySize, copySize);\n op += copySize;\n if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */\n BYTE* const endOfMatch = op + restSize;\n const BYTE* copyFrom = lowPrefix;\n while (op < endOfMatch) *op++ = *copyFrom++;\n } else {\n LZ4_memcpy(op, lowPrefix, restSize);\n op += restSize;\n } }\n continue;\n }\n assert(match >= lowPrefix);\n\n /* copy match within block */\n cpy = op + length;\n\n /* partialDecoding : may end anywhere within the block */\n assert(op<=oend);\n if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n size_t const mlen = MIN(length, (size_t)(oend-op));\n const BYTE* const matchEnd = match + mlen;\n BYTE* const copyEnd = op + mlen;\n if (matchEnd > op) { /* overlap copy */\n while (op < copyEnd) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, mlen);\n }\n op = copyEnd;\n if (op == oend) { break; }\n continue;\n }\n\n if (unlikely(offset<8)) {\n LZ4_write32(op, 0); /* silence msan warning when offset==0 */\n op[0] = match[0];\n op[1] = match[1];\n op[2] = match[2];\n op[3] = match[3];\n match += inc32table[offset];\n LZ4_memcpy(op+4, match, 4);\n match -= dec64table[offset];\n } else {\n LZ4_memcpy(op, match, 8);\n match += 8;\n }\n op += 8;\n\n if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {\n BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);\n if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */\n if (op < oCopyLimit) {\n LZ4_wildCopy8(op, match, oCopyLimit);\n match += oCopyLimit - op;\n op = oCopyLimit;\n }\n while (op < cpy) { *op++ = *match++; }\n } else {\n LZ4_memcpy(op, match, 8);\n if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }\n }\n op = cpy; /* wildcopy correction */\n }\n\n /* end of decoding */\n DEBUGLOG(5, \"decoded %i bytes\", (int) (((char*)op)-dst));\n return (int) (((char*)op)-dst); /* Nb of output bytes decoded */\n\n /* Overflow error detected */\n _output_error:\n return (int) (-(((const char*)ip)-src))-1;\n }\n}\n\n\n/*===== Instantiate the API decoding functions. =====*/\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,\n decode_full_block, noDict,\n (BYTE*)dest, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,\n partial_decode,\n noDict, (BYTE*)dst, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_fast(const char* source, char* dest, int originalSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_fast\");\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, NULL, 0);\n}\n\n/*===== Instantiate a few more decoding cases, used more than once. =====*/\n\nLZ4_FORCE_O2 /* Exported, an obsolete API function. */\nint LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, withPrefix64k,\n (BYTE*)dest - 64 KB, NULL, 0);\n}\n\n/* Another obsolete API function, paired with the previous one. */\nint LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 64 KB, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,\n size_t prefixSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, noDict,\n (BYTE*)dest-prefixSize, NULL, 0);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_forceExtDict(const char* source, char* dest,\n int compressedSize, int maxOutputSize,\n const void* dictStart, size_t dictSize)\n{\n DEBUGLOG(5, \"LZ4_decompress_safe_forceExtDict\");\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nint LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,\n int compressedSize, int targetOutputSize, int dstCapacity,\n const void* dictStart, size_t dictSize)\n{\n dstCapacity = MIN(targetOutputSize, dstCapacity);\n return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,\n partial_decode, usingExtDict,\n (BYTE*)dest, (const BYTE*)dictStart, dictSize);\n}\n\nLZ4_FORCE_O2\nstatic int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,\n const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n 0, (const BYTE*)dictStart, dictSize);\n}\n\n/* The \"double dictionary\" mode, for use with e.g. ring buffers: the first part\n * of the dictionary is passed as prefix, and the second via dictStart + dictSize.\n * These routines are used only once, in LZ4_decompress_*_continue().\n */\nLZ4_FORCE_INLINE\nint LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,\n size_t prefixSize, const void* dictStart, size_t dictSize)\n{\n return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,\n decode_full_block, usingExtDict,\n (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);\n}\n\n/*===== streaming decompression functions =====*/\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nLZ4_streamDecode_t* LZ4_createStreamDecode(void)\n{\n LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));\n return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));\n}\n\nint LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)\n{\n if (LZ4_stream == NULL) { return 0; } /* support free on NULL */\n FREEMEM(LZ4_stream);\n return 0;\n}\n#endif\n\n/*! LZ4_setStreamDecode() :\n * Use this function to instruct where to find the dictionary.\n * This function is not necessary if previous data is still available where it was decoded.\n * Loading a size of 0 is allowed (same effect as no dictionary).\n * @return : 1 if OK, 0 if error\n */\nint LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n lz4sd->prefixSize = (size_t)dictSize;\n if (dictSize) {\n assert(dictionary != NULL);\n lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;\n } else {\n lz4sd->prefixEnd = (const BYTE*) dictionary;\n }\n lz4sd->externalDict = NULL;\n lz4sd->extDictSize = 0;\n return 1;\n}\n\n/*! LZ4_decoderRingBufferSize() :\n * when setting a ring buffer for streaming decompression (optional scenario),\n * provides the minimum size of this ring buffer\n * to be compatible with any source respecting maxBlockSize condition.\n * Note : in a ring buffer scenario,\n * blocks are presumed decompressed next to each other.\n * When not enough space remains for next block (remainingSize < maxBlockSize),\n * decoding resumes from beginning of ring buffer.\n * @return : minimum ring buffer size,\n * or 0 if there is an error (invalid maxBlockSize).\n */\nint LZ4_decoderRingBufferSize(int maxBlockSize)\n{\n if (maxBlockSize < 0) return 0;\n if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;\n if (maxBlockSize < 16) maxBlockSize = 16;\n return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);\n}\n\n/*\n*_continue() :\n These decoding functions allow decompression of multiple blocks in \"streaming\" mode.\n Previously decoded blocks must still be available at the memory position where they were decoded.\n If it's not possible, save the relevant part of decoded data into a safe buffer,\n and indicate where it stands using LZ4_setStreamDecode()\n*/\nLZ4_FORCE_O2\nint LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)\n{\n LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;\n int result;\n\n if (lz4sd->prefixSize == 0) {\n /* The first call, no dictionary yet. */\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n /* They're rolling the current segment. */\n if (lz4sd->prefixSize >= 64 KB - 1)\n result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n else if (lz4sd->extDictSize == 0)\n result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize);\n else\n result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)result;\n lz4sd->prefixEnd += result;\n } else {\n /* The buffer wraps around, or they're switching to another buffer. */\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)result;\n lz4sd->prefixEnd = (BYTE*)dest + result;\n }\n\n return result;\n}\n\nLZ4_FORCE_O2 int\nLZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,\n const char* source, char* dest, int originalSize)\n{\n LZ4_streamDecode_t_internal* const lz4sd =\n (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);\n int result;\n\n DEBUGLOG(5, \"LZ4_decompress_fast_continue (toDecodeSize=%i)\", originalSize);\n assert(originalSize >= 0);\n\n if (lz4sd->prefixSize == 0) {\n DEBUGLOG(5, \"first invocation : no prefix nor extDict\");\n assert(lz4sd->extDictSize == 0);\n result = LZ4_decompress_fast(source, dest, originalSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n } else if (lz4sd->prefixEnd == (BYTE*)dest) {\n DEBUGLOG(5, \"continue using existing prefix\");\n result = LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n lz4sd->prefixSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize += (size_t)originalSize;\n lz4sd->prefixEnd += originalSize;\n } else {\n DEBUGLOG(5, \"prefix becomes extDict\");\n lz4sd->extDictSize = lz4sd->prefixSize;\n lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;\n result = LZ4_decompress_fast_extDict(source, dest, originalSize,\n lz4sd->externalDict, lz4sd->extDictSize);\n if (result <= 0) return result;\n lz4sd->prefixSize = (size_t)originalSize;\n lz4sd->prefixEnd = (BYTE*)dest + originalSize;\n }\n\n return result;\n}\n\n\n/*\nAdvanced decoding functions :\n*_usingDict() :\n These decoding functions work the same as \"_continue\" ones,\n the dictionary must be explicitly provided within parameters\n*/\n\nint LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)\n{\n if (dictSize==0)\n return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);\n if (dictStart+dictSize == dest) {\n if (dictSize >= 64 KB - 1) {\n return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);\n }\n assert(dictSize >= 0);\n return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);\n}\n\nint LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)\n{\n if (dictSize==0 || dictStart+dictSize == dest)\n return LZ4_decompress_unsafe_generic(\n (const BYTE*)source, (BYTE*)dest, originalSize,\n (size_t)dictSize, NULL, 0);\n assert(dictSize >= 0);\n return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);\n}\n\n\n/*=*************************************************\n* Obsolete Functions\n***************************************************/\n/* obsolete compression functions */\nint LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)\n{\n return LZ4_compress_default(source, dest, inputSize, maxOutputSize);\n}\nint LZ4_compress(const char* src, char* dest, int srcSize)\n{\n return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));\n}\nint LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);\n}\nint LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)\n{\n return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);\n}\nint LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)\n{\n return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);\n}\nint LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)\n{\n return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);\n}\n\n/*\nThese decompression functions are deprecated and should no longer be used.\nThey are only provided here for compatibility with older user programs.\n- LZ4_uncompress is totally equivalent to LZ4_decompress_fast\n- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe\n*/\nint LZ4_uncompress (const char* source, char* dest, int outputSize)\n{\n return LZ4_decompress_fast(source, dest, outputSize);\n}\nint LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)\n{\n return LZ4_decompress_safe(source, dest, isize, maxOutputSize);\n}\n\n/* Obsolete Streaming functions */\n\nint LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }\n\nint LZ4_resetStreamState(void* state, char* inputBuffer)\n{\n (void)inputBuffer;\n LZ4_resetStream((LZ4_stream_t*)state);\n return 0;\n}\n\n#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)\nvoid* LZ4_create (char* inputBuffer)\n{\n (void)inputBuffer;\n return LZ4_createStream();\n}\n#endif\n\nchar* LZ4_slideInputBuffer (void* state)\n{\n /* avoid const char * -> char * conversion warning */\n return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;\n}\n\n#endif /* LZ4_COMMONDEFS_ONLY */\n", "middle_code": "int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;\n int result;\n LZ4_renormDictT(streamPtr, srcSize);\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);\n }\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)srcSize;\n return result;\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "c", "sub_task_type": null}, "context_code": [["/pogocache/src/hashmap.c", "// Copyright 2020 Joshua J Baker. All rights reserved.\n// Use of this source code is governed by an MIT-style\n// license that can be found in the LICENSE file.\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\n#define GROW_AT 0.60 /* 60% */\n#define SHRINK_AT 0.10 /* 10% */\n\n#ifndef HASHMAP_LOAD_FACTOR\n#define HASHMAP_LOAD_FACTOR GROW_AT\n#endif\n\nstatic void *(*__malloc)(size_t) = NULL;\nstatic void *(*__realloc)(void *, size_t) = NULL;\nstatic void (*__free)(void *) = NULL;\n\n// hashmap_set_allocator allows for configuring a custom allocator for\n// all hashmap library operations. This function, if needed, should be called\n// only once at startup and a prior to calling hashmap_new().\nvoid hashmap_set_allocator(void *(*malloc)(size_t), void (*free)(void*)) {\n __malloc = malloc;\n __free = free;\n}\n\nstruct bucket {\n uint64_t hash:48;\n uint64_t dib:16;\n};\n\n// hashmap is an open addressed hash map using robinhood hashing.\nstruct hashmap {\n void *(*malloc)(size_t);\n void *(*realloc)(void *, size_t);\n void (*free)(void *);\n size_t elsize;\n size_t cap;\n uint64_t seed0;\n uint64_t seed1;\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1);\n int (*compare)(const void *a, const void *b, void *udata);\n void (*elfree)(void *item);\n void *udata;\n size_t bucketsz;\n size_t nbuckets;\n size_t count;\n size_t mask;\n size_t growat;\n size_t shrinkat;\n uint8_t loadfactor;\n uint8_t growpower;\n bool oom;\n void *buckets;\n void *spare;\n void *edata;\n};\n\nvoid hashmap_set_grow_by_power(struct hashmap *map, size_t power) {\n map->growpower = power < 1 ? 1 : power > 16 ? 16 : power;\n}\n\nstatic double clamp_load_factor(double factor, double default_factor) {\n // Check for NaN and clamp between 50% and 90%\n return factor != factor ? default_factor : \n factor < 0.50 ? 0.50 : \n factor > 0.95 ? 0.95 : \n factor;\n}\n\nvoid hashmap_set_load_factor(struct hashmap *map, double factor) {\n factor = clamp_load_factor(factor, map->loadfactor / 100.0);\n map->loadfactor = factor * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n}\n\nstatic struct bucket *bucket_at0(void *buckets, size_t bucketsz, size_t i) {\n return (struct bucket*)(((char*)buckets)+(bucketsz*i));\n}\n\nstatic struct bucket *bucket_at(struct hashmap *map, size_t index) {\n return bucket_at0(map->buckets, map->bucketsz, index);\n}\n\nstatic void *bucket_item(struct bucket *entry) {\n return ((char*)entry)+sizeof(struct bucket);\n}\n\nstatic uint64_t clip_hash(uint64_t hash) {\n return hash & 0xFFFFFFFFFFFF;\n}\n\nstatic uint64_t get_hash(struct hashmap *map, const void *key) {\n return clip_hash(map->hash(key, map->seed0, map->seed1));\n}\n\n\n// hashmap_new_with_allocator returns a new hash map using a custom allocator.\n// See hashmap_new for more information information\nstruct hashmap *hashmap_new_with_allocator(void *(*_malloc)(size_t), \n void *(*_realloc)(void*, size_t), void (*_free)(void*),\n size_t elsize, size_t cap, uint64_t seed0, uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n _malloc = _malloc ? _malloc : __malloc ? __malloc : malloc;\n _realloc = _realloc ? _realloc : __realloc ? __realloc : realloc;\n _free = _free ? _free : __free ? __free : free;\n size_t ncap = 16;\n if (cap < ncap) {\n cap = ncap;\n } else {\n while (ncap < cap) {\n ncap *= 2;\n }\n cap = ncap;\n }\n size_t bucketsz = sizeof(struct bucket) + elsize;\n while (bucketsz & (sizeof(uintptr_t)-1)) {\n bucketsz++;\n }\n // hashmap + spare + edata\n size_t size = sizeof(struct hashmap)+bucketsz*2;\n struct hashmap *map = _malloc(size);\n if (!map) {\n return NULL;\n }\n memset(map, 0, sizeof(struct hashmap));\n map->elsize = elsize;\n map->bucketsz = bucketsz;\n map->seed0 = seed0;\n map->seed1 = seed1;\n map->hash = hash;\n map->compare = compare;\n map->elfree = elfree;\n map->udata = udata;\n map->spare = ((char*)map)+sizeof(struct hashmap);\n map->edata = (char*)map->spare+bucketsz;\n map->cap = cap;\n map->nbuckets = cap;\n map->mask = map->nbuckets-1;\n map->buckets = _malloc(map->bucketsz*map->nbuckets);\n if (!map->buckets) {\n _free(map);\n return NULL;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->growpower = 1;\n map->loadfactor = clamp_load_factor(HASHMAP_LOAD_FACTOR, GROW_AT) * 100;\n map->growat = map->nbuckets * (map->loadfactor / 100.0);\n map->shrinkat = map->nbuckets * SHRINK_AT;\n map->malloc = _malloc;\n map->realloc = _realloc;\n map->free = _free;\n return map; \n}\n\n// hashmap_new returns a new hash map. \n// Param `elsize` is the size of each element in the tree. Every element that\n// is inserted, deleted, or retrieved will be this size.\n// Param `cap` is the default lower capacity of the hashmap. Setting this to\n// zero will default to 16.\n// Params `seed0` and `seed1` are optional seed values that are passed to the \n// following `hash` function. These can be any value you wish but it's often \n// best to use randomly generated values.\n// Param `hash` is a function that generates a hash value for an item. It's\n// important that you provide a good hash function, otherwise it will perform\n// poorly or be vulnerable to Denial-of-service attacks. This implementation\n// comes with two helper functions `hashmap_sip()` and `hashmap_murmur()`.\n// Param `compare` is a function that compares items in the tree. See the \n// qsort stdlib function for an example of how this function works.\n// The hashmap must be freed with hashmap_free(). \n// Param `elfree` is a function that frees a specific item. This should be NULL\n// unless you're storing some kind of reference data in the hash.\nstruct hashmap *hashmap_new(size_t elsize, size_t cap, uint64_t seed0, \n uint64_t seed1,\n uint64_t (*hash)(const void *item, uint64_t seed0, uint64_t seed1),\n int (*compare)(const void *a, const void *b, void *udata),\n void (*elfree)(void *item),\n void *udata)\n{\n return hashmap_new_with_allocator(NULL, NULL, NULL, elsize, cap, seed0, \n seed1, hash, compare, elfree, udata);\n}\n\nstatic void free_elements(struct hashmap *map) {\n if (map->elfree) {\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib) map->elfree(bucket_item(bucket));\n }\n }\n}\n\n// hashmap_clear quickly clears the map. \n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\n// When the update_cap is provided, the map's capacity will be updated to match\n// the currently number of allocated buckets. This is an optimization to ensure\n// that this operation does not perform any allocations.\nvoid hashmap_clear(struct hashmap *map, bool update_cap) {\n map->count = 0;\n free_elements(map);\n if (update_cap) {\n map->cap = map->nbuckets;\n } else if (map->nbuckets != map->cap) {\n void *new_buckets = map->malloc(map->bucketsz*map->cap);\n if (new_buckets) {\n map->free(map->buckets);\n map->buckets = new_buckets;\n }\n map->nbuckets = map->cap;\n }\n memset(map->buckets, 0, map->bucketsz*map->nbuckets);\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * (map->loadfactor / 100.0) ;\n map->shrinkat = map->nbuckets * SHRINK_AT;\n}\n\nstatic bool resize0(struct hashmap *map, size_t new_cap) {\n struct hashmap *map2 = hashmap_new_with_allocator(map->malloc, map->realloc, \n map->free, map->elsize, new_cap, map->seed0, map->seed1, map->hash, \n map->compare, map->elfree, map->udata);\n if (!map2) return false;\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *entry = bucket_at(map, i);\n if (!entry->dib) {\n continue;\n }\n entry->dib = 1;\n size_t j = entry->hash & map2->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map2, j);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n break;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map2->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map2->spare, map->bucketsz);\n }\n j = (j + 1) & map2->mask;\n entry->dib += 1;\n }\n }\n map->free(map->buckets);\n map->buckets = map2->buckets;\n map->nbuckets = map2->nbuckets;\n map->mask = map2->mask;\n map->growat = map2->growat;\n map->shrinkat = map2->shrinkat;\n map->free(map2);\n return true;\n}\n\nstatic bool resize(struct hashmap *map, size_t new_cap) {\n return resize0(map, new_cap);\n}\n\n// hashmap_set_with_hash works like hashmap_set but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_set_with_hash(struct hashmap *map, const void *item,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*(1<growpower))) {\n map->oom = true;\n return NULL;\n }\n }\n\n struct bucket *entry = map->edata;\n entry->hash = hash;\n entry->dib = 1;\n void *eitem = bucket_item(entry);\n memcpy(eitem, item, map->elsize);\n\n void *bitem;\n size_t i = entry->hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib == 0) {\n memcpy(bucket, entry, map->bucketsz);\n map->count++;\n return NULL;\n }\n bitem = bucket_item(bucket);\n if (entry->hash == bucket->hash && (!map->compare ||\n map->compare(eitem, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n memcpy(bitem, eitem, map->elsize);\n return map->spare;\n }\n if (bucket->dib < entry->dib) {\n memcpy(map->spare, bucket, map->bucketsz);\n memcpy(bucket, entry, map->bucketsz);\n memcpy(entry, map->spare, map->bucketsz);\n eitem = bucket_item(entry);\n }\n i = (i + 1) & map->mask;\n entry->dib += 1;\n }\n}\n\n// hashmap_set inserts or replaces an item in the hash map. If an item is\n// replaced then it is returned otherwise NULL is returned. This operation\n// may allocate memory. If the system is unable to allocate additional\n// memory then NULL is returned and hashmap_oom() returns true.\nconst void *hashmap_set(struct hashmap *map, const void *item) {\n return hashmap_set_with_hash(map, item, get_hash(map, item));\n}\n\n// hashmap_get_with_hash works like hashmap_get but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_get_with_hash(struct hashmap *map, const void *key, \n uint64_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) return NULL;\n if (bucket->hash == hash) {\n void *bitem = bucket_item(bucket);\n if (!map->compare || map->compare(key, bitem, map->udata) == 0) {\n return bitem;\n }\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_get returns the item based on the provided key. If the item is not\n// found then NULL is returned.\nconst void *hashmap_get(struct hashmap *map, const void *key) {\n return hashmap_get_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_probe returns the item in the bucket at position or NULL if an item\n// is not set for that bucket. The position is 'moduloed' by the number of \n// buckets in the hashmap.\nconst void *hashmap_probe(struct hashmap *map, uint64_t position) {\n size_t i = position & map->mask;\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n return bucket_item(bucket);\n}\n\n// hashmap_delete_with_hash works like hashmap_delete but you provide your\n// own hash. The 'hash' callback provided to the hashmap_new function\n// will not be called\nconst void *hashmap_delete_with_hash(struct hashmap *map, const void *key,\n uint64_t hash)\n{\n hash = clip_hash(hash);\n map->oom = false;\n size_t i = hash & map->mask;\n while(1) {\n struct bucket *bucket = bucket_at(map, i);\n if (!bucket->dib) {\n return NULL;\n }\n void *bitem = bucket_item(bucket);\n if (bucket->hash == hash && (!map->compare ||\n map->compare(key, bitem, map->udata) == 0))\n {\n memcpy(map->spare, bitem, map->elsize);\n bucket->dib = 0;\n while(1) {\n struct bucket *prev = bucket;\n i = (i + 1) & map->mask;\n bucket = bucket_at(map, i);\n if (bucket->dib <= 1) {\n prev->dib = 0;\n break;\n }\n memcpy(prev, bucket, map->bucketsz);\n prev->dib--;\n }\n map->count--;\n if (map->nbuckets > map->cap && map->count <= map->shrinkat) {\n // Ignore the return value. It's ok for the resize operation to\n // fail to allocate enough memory because a shrink operation\n // does not change the integrity of the data.\n resize(map, map->nbuckets/2);\n }\n return map->spare;\n }\n i = (i + 1) & map->mask;\n }\n}\n\n// hashmap_delete removes an item from the hash map and returns it. If the\n// item is not found then NULL is returned.\nconst void *hashmap_delete(struct hashmap *map, const void *key) {\n return hashmap_delete_with_hash(map, key, get_hash(map, key));\n}\n\n// hashmap_count returns the number of items in the hash map.\nsize_t hashmap_count(struct hashmap *map) {\n return map->count;\n}\n\n// hashmap_free frees the hash map\n// Every item is called with the element-freeing function given in hashmap_new,\n// if present, to free any data referenced in the elements of the hashmap.\nvoid hashmap_free(struct hashmap *map) {\n if (!map) return;\n free_elements(map);\n map->free(map->buckets);\n map->free(map);\n}\n\n// hashmap_oom returns true if the last hashmap_set() call failed due to the \n// system being out of memory.\nbool hashmap_oom(struct hashmap *map) {\n return map->oom;\n}\n\n// hashmap_scan iterates over all items in the hash map\n// Param `iter` can return false to stop iteration early.\n// Returns false if the iteration has been stopped early.\nbool hashmap_scan(struct hashmap *map, \n bool (*iter)(const void *item, void *udata), void *udata)\n{\n for (size_t i = 0; i < map->nbuckets; i++) {\n struct bucket *bucket = bucket_at(map, i);\n if (bucket->dib && !iter(bucket_item(bucket), udata)) {\n return false;\n }\n }\n return true;\n}\n\n// hashmap_iter iterates one key at a time yielding a reference to an\n// entry at each iteration. Useful to write simple loops and avoid writing\n// dedicated callbacks and udata structures, as in hashmap_scan.\n//\n// map is a hash map handle. i is a pointer to a size_t cursor that\n// should be initialized to 0 at the beginning of the loop. item is a void\n// pointer pointer that is populated with the retrieved item. Note that this\n// is NOT a copy of the item stored in the hash map and can be directly\n// modified.\n//\n// Note that if hashmap_delete() is called on the hashmap being iterated,\n// the buckets are rearranged and the iterator must be reset to 0, otherwise\n// unexpected results may be returned after deletion.\n//\n// This function has not been tested for thread safety.\n//\n// The function returns true if an item was retrieved; false if the end of the\n// iteration has been reached.\nbool hashmap_iter(struct hashmap *map, size_t *i, void **item) {\n struct bucket *bucket;\n do {\n if (*i >= map->nbuckets) return false;\n bucket = bucket_at(map, *i);\n (*i)++;\n } while (!bucket->dib);\n *item = bucket_item(bucket);\n return true;\n}\n\n\n//-----------------------------------------------------------------------------\n// SipHash reference C implementation\n//\n// Copyright (c) 2012-2016 Jean-Philippe Aumasson\n// \n// Copyright (c) 2012-2014 Daniel J. Bernstein \n//\n// To the extent possible under law, the author(s) have dedicated all copyright\n// and related and neighboring rights to this software to the public domain\n// worldwide. This software is distributed without any warranty.\n//\n// You should have received a copy of the CC0 Public Domain Dedication along\n// with this software. If not, see\n// .\n//\n// default: SipHash-2-4\n//-----------------------------------------------------------------------------\nstatic uint64_t SIP64(const uint8_t *in, const size_t inlen, uint64_t seed0,\n uint64_t seed1) \n{\n#define U8TO64_LE(p) \\\n { (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \\\n ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \\\n ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \\\n ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56)) }\n#define U64TO8_LE(p, v) \\\n { U32TO8_LE((p), (uint32_t)((v))); \\\n U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); }\n#define U32TO8_LE(p, v) \\\n { (p)[0] = (uint8_t)((v)); \\\n (p)[1] = (uint8_t)((v) >> 8); \\\n (p)[2] = (uint8_t)((v) >> 16); \\\n (p)[3] = (uint8_t)((v) >> 24); }\n#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))\n#define SIPROUND \\\n { v0 += v1; v1 = ROTL(v1, 13); \\\n v1 ^= v0; v0 = ROTL(v0, 32); \\\n v2 += v3; v3 = ROTL(v3, 16); \\\n v3 ^= v2; \\\n v0 += v3; v3 = ROTL(v3, 21); \\\n v3 ^= v0; \\\n v2 += v1; v1 = ROTL(v1, 17); \\\n v1 ^= v2; v2 = ROTL(v2, 32); }\n uint64_t k0 = U8TO64_LE((uint8_t*)&seed0);\n uint64_t k1 = U8TO64_LE((uint8_t*)&seed1);\n uint64_t v3 = UINT64_C(0x7465646279746573) ^ k1;\n uint64_t v2 = UINT64_C(0x6c7967656e657261) ^ k0;\n uint64_t v1 = UINT64_C(0x646f72616e646f6d) ^ k1;\n uint64_t v0 = UINT64_C(0x736f6d6570736575) ^ k0;\n const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t));\n for (; in != end; in += 8) {\n uint64_t m = U8TO64_LE(in);\n v3 ^= m;\n SIPROUND; SIPROUND;\n v0 ^= m;\n }\n const int left = inlen & 7;\n uint64_t b = ((uint64_t)inlen) << 56;\n switch (left) {\n case 7: b |= ((uint64_t)in[6]) << 48; /* fall through */\n case 6: b |= ((uint64_t)in[5]) << 40; /* fall through */\n case 5: b |= ((uint64_t)in[4]) << 32; /* fall through */\n case 4: b |= ((uint64_t)in[3]) << 24; /* fall through */\n case 3: b |= ((uint64_t)in[2]) << 16; /* fall through */\n case 2: b |= ((uint64_t)in[1]) << 8; /* fall through */\n case 1: b |= ((uint64_t)in[0]); break;\n case 0: break;\n }\n v3 ^= b;\n SIPROUND; SIPROUND;\n v0 ^= b;\n v2 ^= 0xff;\n SIPROUND; SIPROUND; SIPROUND; SIPROUND;\n b = v0 ^ v1 ^ v2 ^ v3;\n uint64_t out = 0;\n U64TO8_LE((uint8_t*)&out, b);\n return out;\n}\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n//\n// Murmur3_86_128\n//-----------------------------------------------------------------------------\nstatic uint64_t MM86128(const void *key, const int len, uint32_t seed) {\n#define\tROTL32(x, r) ((x << r) | (x >> (32 - r)))\n#define FMIX32(h) h^=h>>16; h*=0x85ebca6b; h^=h>>13; h*=0xc2b2ae35; h^=h>>16;\n const uint8_t * data = (const uint8_t*)key;\n const int nblocks = len / 16;\n uint32_t h1 = seed;\n uint32_t h2 = seed;\n uint32_t h3 = seed;\n uint32_t h4 = seed;\n uint32_t c1 = 0x239b961b; \n uint32_t c2 = 0xab0e9789;\n uint32_t c3 = 0x38b34ae5; \n uint32_t c4 = 0xa1e38b93;\n const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);\n for (int i = -nblocks; i; i++) {\n uint32_t k1 = blocks[i*4+0];\n uint32_t k2 = blocks[i*4+1];\n uint32_t k3 = blocks[i*4+2];\n uint32_t k4 = blocks[i*4+3];\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;\n }\n const uint8_t * tail = (const uint8_t*)(data + nblocks*16);\n uint32_t k1 = 0;\n uint32_t k2 = 0;\n uint32_t k3 = 0;\n uint32_t k4 = 0;\n switch(len & 15) {\n case 15: k4 ^= tail[14] << 16; /* fall through */\n case 14: k4 ^= tail[13] << 8; /* fall through */\n case 13: k4 ^= tail[12] << 0;\n k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n /* fall through */\n case 12: k3 ^= tail[11] << 24; /* fall through */\n case 11: k3 ^= tail[10] << 16; /* fall through */\n case 10: k3 ^= tail[ 9] << 8; /* fall through */\n case 9: k3 ^= tail[ 8] << 0;\n k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n /* fall through */\n case 8: k2 ^= tail[ 7] << 24; /* fall through */\n case 7: k2 ^= tail[ 6] << 16; /* fall through */\n case 6: k2 ^= tail[ 5] << 8; /* fall through */\n case 5: k2 ^= tail[ 4] << 0;\n k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n /* fall through */\n case 4: k1 ^= tail[ 3] << 24; /* fall through */\n case 3: k1 ^= tail[ 2] << 16; /* fall through */\n case 2: k1 ^= tail[ 1] << 8; /* fall through */\n case 1: k1 ^= tail[ 0] << 0;\n k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n /* fall through */\n };\n h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n FMIX32(h1); FMIX32(h2); FMIX32(h3); FMIX32(h4);\n h1 += h2; h1 += h3; h1 += h4;\n h2 += h1; h3 += h1; h4 += h1;\n return (((uint64_t)h2)<<32)|h1;\n}\n\n//-----------------------------------------------------------------------------\n// xxHash Library\n// Copyright (c) 2012-2021 Yann Collet\n// All rights reserved.\n// \n// BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)\n//\n// xxHash3\n//-----------------------------------------------------------------------------\n#define XXH_PRIME_1 11400714785074694791ULL\n#define XXH_PRIME_2 14029467366897019727ULL\n#define XXH_PRIME_3 1609587929392839161ULL\n#define XXH_PRIME_4 9650029242287828579ULL\n#define XXH_PRIME_5 2870177450012600261ULL\n\nstatic uint64_t XXH_read64(const void* memptr) {\n uint64_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint32_t XXH_read32(const void* memptr) {\n uint32_t val;\n memcpy(&val, memptr, sizeof(val));\n return val;\n}\n\nstatic uint64_t XXH_rotl64(uint64_t x, int r) {\n return (x << r) | (x >> (64 - r));\n}\n\nstatic uint64_t xxh3(const void* data, size_t len, uint64_t seed) {\n const uint8_t* p = (const uint8_t*)data;\n const uint8_t* const end = p + len;\n uint64_t h64;\n\n if (len >= 32) {\n const uint8_t* const limit = end - 32;\n uint64_t v1 = seed + XXH_PRIME_1 + XXH_PRIME_2;\n uint64_t v2 = seed + XXH_PRIME_2;\n uint64_t v3 = seed + 0;\n uint64_t v4 = seed - XXH_PRIME_1;\n\n do {\n v1 += XXH_read64(p) * XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n\n v2 += XXH_read64(p + 8) * XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n\n v3 += XXH_read64(p + 16) * XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n\n v4 += XXH_read64(p + 24) * XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n\n p += 32;\n } while (p <= limit);\n\n h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + \n XXH_rotl64(v4, 18);\n\n v1 *= XXH_PRIME_2;\n v1 = XXH_rotl64(v1, 31);\n v1 *= XXH_PRIME_1;\n h64 ^= v1;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v2 *= XXH_PRIME_2;\n v2 = XXH_rotl64(v2, 31);\n v2 *= XXH_PRIME_1;\n h64 ^= v2;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v3 *= XXH_PRIME_2;\n v3 = XXH_rotl64(v3, 31);\n v3 *= XXH_PRIME_1;\n h64 ^= v3;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n\n v4 *= XXH_PRIME_2;\n v4 = XXH_rotl64(v4, 31);\n v4 *= XXH_PRIME_1;\n h64 ^= v4;\n h64 = h64 * XXH_PRIME_1 + XXH_PRIME_4;\n }\n else {\n h64 = seed + XXH_PRIME_5;\n }\n\n h64 += (uint64_t)len;\n\n while (p + 8 <= end) {\n uint64_t k1 = XXH_read64(p);\n k1 *= XXH_PRIME_2;\n k1 = XXH_rotl64(k1, 31);\n k1 *= XXH_PRIME_1;\n h64 ^= k1;\n h64 = XXH_rotl64(h64, 27) * XXH_PRIME_1 + XXH_PRIME_4;\n p += 8;\n }\n\n if (p + 4 <= end) {\n h64 ^= (uint64_t)(XXH_read32(p)) * XXH_PRIME_1;\n h64 = XXH_rotl64(h64, 23) * XXH_PRIME_2 + XXH_PRIME_3;\n p += 4;\n }\n\n while (p < end) {\n h64 ^= (*p) * XXH_PRIME_5;\n h64 = XXH_rotl64(h64, 11) * XXH_PRIME_1;\n p++;\n }\n\n h64 ^= h64 >> 33;\n h64 *= XXH_PRIME_2;\n h64 ^= h64 >> 29;\n h64 *= XXH_PRIME_3;\n h64 ^= h64 >> 32;\n\n return h64;\n}\n\n// hashmap_sip returns a hash value for `data` using SipHash-2-4.\nuint64_t hashmap_sip(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n return SIP64((uint8_t*)data, len, seed0, seed1);\n}\n\n// hashmap_murmur returns a hash value for `data` using Murmur3_86_128.\nuint64_t hashmap_murmur(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return MM86128(data, len, seed0);\n}\n\nuint64_t hashmap_xxhash3(const void *data, size_t len, uint64_t seed0,\n uint64_t seed1)\n{\n (void)seed1;\n return xxh3(data, len ,seed0);\n}\n\n//==============================================================================\n// TESTS AND BENCHMARKS\n// $ cc -DHASHMAP_TEST hashmap.c && ./a.out # run tests\n// $ cc -DHASHMAP_TEST -O3 hashmap.c && BENCH=1 ./a.out # run benchmarks\n//==============================================================================\n#ifdef HASHMAP_TEST\n\nstatic size_t deepcount(struct hashmap *map) {\n size_t count = 0;\n for (size_t i = 0; i < map->nbuckets; i++) {\n if (bucket_at(map, i)->dib) {\n count++;\n }\n }\n return count;\n}\n\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wpedantic\"\n#endif\n#ifdef __clang__\n#pragma GCC diagnostic ignored \"-Wunknown-warning-option\"\n#pragma GCC diagnostic ignored \"-Wcompound-token-split-by-macro\"\n#pragma GCC diagnostic ignored \"-Wgnu-statement-expression-from-macro-expansion\"\n#endif\n#ifdef __GNUC__\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \n#include \n#include \n#include \n#include \n#include \"hashmap.h\"\n\nstatic bool rand_alloc_fail = false;\nstatic int rand_alloc_fail_odds = 3; // 1 in 3 chance malloc will fail.\nstatic uintptr_t total_allocs = 0;\nstatic uintptr_t total_mem = 0;\n\nstatic void *xmalloc(size_t size) {\n if (rand_alloc_fail && rand()%rand_alloc_fail_odds == 0) {\n return NULL;\n }\n void *mem = malloc(sizeof(uintptr_t)+size);\n assert(mem);\n *(uintptr_t*)mem = size;\n total_allocs++;\n total_mem += size;\n return (char*)mem+sizeof(uintptr_t);\n}\n\nstatic void xfree(void *ptr) {\n if (ptr) {\n total_mem -= *(uintptr_t*)((char*)ptr-sizeof(uintptr_t));\n free((char*)ptr-sizeof(uintptr_t));\n total_allocs--;\n }\n}\n\nstatic void shuffle(void *array, size_t numels, size_t elsize) {\n char tmp[elsize];\n char *arr = array;\n for (size_t i = 0; i < numels - 1; i++) {\n int j = i + rand() / (RAND_MAX / (numels - i) + 1);\n memcpy(tmp, arr + j * elsize, elsize);\n memcpy(arr + j * elsize, arr + i * elsize, elsize);\n memcpy(arr + i * elsize, tmp, elsize);\n }\n}\n\nstatic bool iter_ints(const void *item, void *udata) {\n int *vals = *(int**)udata;\n vals[*(int*)item] = 1;\n return true;\n}\n\nstatic int compare_ints_udata(const void *a, const void *b, void *udata) {\n return *(int*)a - *(int*)b;\n}\n\nstatic int compare_strs(const void *a, const void *b, void *udata) {\n return strcmp(*(char**)a, *(char**)b);\n}\n\nstatic uint64_t hash_int(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(item, sizeof(int), seed0, seed1);\n // return hashmap_sip(item, sizeof(int), seed0, seed1);\n // return hashmap_murmur(item, sizeof(int), seed0, seed1);\n}\n\nstatic uint64_t hash_str(const void *item, uint64_t seed0, uint64_t seed1) {\n return hashmap_xxhash3(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_sip(*(char**)item, strlen(*(char**)item), seed0, seed1);\n // return hashmap_murmur(*(char**)item, strlen(*(char**)item), seed0, seed1);\n}\n\nstatic void free_str(void *item) {\n xfree(*(char**)item);\n}\n\nstatic void all(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):2000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n rand_alloc_fail = true;\n\n // test sip and murmur hashes\n assert(hashmap_sip(\"hello\", 5, 1, 2) == 2957200328589801622);\n assert(hashmap_murmur(\"hello\", 5, 1, 2) == 1682575153221130884);\n assert(hashmap_xxhash3(\"hello\", 5, 1, 2) == 2584346877953614258);\n\n int *vals;\n while (!(vals = xmalloc(N * sizeof(int)))) {}\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n struct hashmap *map;\n\n while (!(map = hashmap_new(sizeof(int), 0, seed, seed, \n hash_int, compare_ints_udata, NULL, NULL))) {}\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n // // printf(\"== %d ==\\n\", vals[i]);\n assert(map->count == (size_t)i);\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n const int *v;\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n \n for (int j = 0; j < i; j++) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n while (true) {\n v = hashmap_set(map, &vals[i]);\n if (!v) {\n assert(hashmap_oom(map));\n continue;\n } else {\n assert(!hashmap_oom(map));\n assert(v && *v == vals[i]);\n break;\n }\n }\n v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(!hashmap_delete(map, &vals[i]));\n assert(!hashmap_set(map, &vals[i]));\n assert(map->count == (size_t)(i+1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n }\n\n int *vals2;\n while (!(vals2 = xmalloc(N * sizeof(int)))) {}\n memset(vals2, 0, N * sizeof(int));\n assert(hashmap_scan(map, iter_ints, &vals2));\n\n // Test hashmap_iter. This does the same as hashmap_scan above.\n size_t iter = 0;\n void *iter_val;\n while (hashmap_iter (map, &iter, &iter_val)) {\n assert (iter_ints(iter_val, &vals2));\n }\n for (int i = 0; i < N; i++) {\n assert(vals2[i] == 1);\n }\n xfree(vals2);\n\n shuffle(vals, N, sizeof(int));\n for (int i = 0; i < N; i++) {\n const int *v;\n v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n assert(!hashmap_get(map, &vals[i]));\n assert(map->count == (size_t)(N-i-1));\n assert(map->count == hashmap_count(map));\n assert(map->count == deepcount(map));\n for (int j = N-1; j > i; j--) {\n v = hashmap_get(map, &vals[j]);\n assert(v && *v == vals[j]);\n }\n }\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n assert(map->count != 0);\n size_t prev_cap = map->cap;\n hashmap_clear(map, true);\n assert(prev_cap < map->cap);\n assert(map->count == 0);\n\n\n for (int i = 0; i < N; i++) {\n while (true) {\n assert(!hashmap_set(map, &vals[i]));\n if (!hashmap_oom(map)) {\n break;\n }\n }\n }\n\n prev_cap = map->cap;\n hashmap_clear(map, false);\n assert(prev_cap == map->cap);\n\n hashmap_free(map);\n\n xfree(vals);\n\n\n while (!(map = hashmap_new(sizeof(char*), 0, seed, seed,\n hash_str, compare_strs, free_str, NULL)));\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_clear(map, false);\n assert(hashmap_count(map) == 0);\n\n for (int i = 0; i < N; i++) {\n char *str;\n while (!(str = xmalloc(16)));\n snprintf(str, 16, \"s%i\", i);\n while(!hashmap_set(map, &str));\n }\n\n hashmap_free(map);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\n#define bench(name, N, code) {{ \\\n if (strlen(name) > 0) { \\\n printf(\"%-14s \", name); \\\n } \\\n size_t tmem = total_mem; \\\n size_t tallocs = total_allocs; \\\n uint64_t bytes = 0; \\\n clock_t begin = clock(); \\\n for (int i = 0; i < N; i++) { \\\n (code); \\\n } \\\n clock_t end = clock(); \\\n double elapsed_secs = (double)(end - begin) / CLOCKS_PER_SEC; \\\n double bytes_sec = (double)bytes/elapsed_secs; \\\n printf(\"%d ops in %.3f secs, %.0f ns/op, %.0f op/sec\", \\\n N, elapsed_secs, \\\n elapsed_secs/(double)N*1e9, \\\n (double)N/elapsed_secs \\\n ); \\\n if (bytes > 0) { \\\n printf(\", %.1f GB/sec\", bytes_sec/1024/1024/1024); \\\n } \\\n if (total_mem > tmem) { \\\n size_t used_mem = total_mem-tmem; \\\n printf(\", %.2f bytes/op\", (double)used_mem/N); \\\n } \\\n if (total_allocs > tallocs) { \\\n size_t used_allocs = total_allocs-tallocs; \\\n printf(\", %.2f allocs/op\", (double)used_allocs/N); \\\n } \\\n printf(\"\\n\"); \\\n}}\n\nstatic void benchmarks(void) {\n int seed = getenv(\"SEED\")?atoi(getenv(\"SEED\")):time(NULL);\n int N = getenv(\"N\")?atoi(getenv(\"N\")):5000000;\n printf(\"seed=%d, count=%d, item_size=%zu\\n\", seed, N, sizeof(int));\n srand(seed);\n\n\n int *vals = xmalloc(N * sizeof(int));\n for (int i = 0; i < N; i++) {\n vals[i] = i;\n }\n\n shuffle(vals, N, sizeof(int));\n\n struct hashmap *map;\n shuffle(vals, N, sizeof(int));\n\n map = hashmap_new(sizeof(int), 0, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete\", N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n hashmap_free(map);\n\n map = hashmap_new(sizeof(int), N, seed, seed, hash_int, compare_ints_udata, \n NULL, NULL);\n bench(\"set (cap)\", N, {\n const int *v = hashmap_set(map, &vals[i]);\n assert(!v);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"get (cap)\", N, {\n const int *v = hashmap_get(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n shuffle(vals, N, sizeof(int));\n bench(\"delete (cap)\" , N, {\n const int *v = hashmap_delete(map, &vals[i]);\n assert(v && *v == vals[i]);\n })\n\n hashmap_free(map);\n\n \n xfree(vals);\n\n if (total_allocs != 0) {\n fprintf(stderr, \"total_allocs: expected 0, got %lu\\n\", total_allocs);\n exit(1);\n }\n}\n\nint main(void) {\n hashmap_set_allocator(xmalloc, xfree);\n\n if (getenv(\"BENCH\")) {\n printf(\"Running hashmap.c benchmarks...\\n\");\n benchmarks();\n } else {\n printf(\"Running hashmap.c tests...\\n\");\n all();\n printf(\"PASSED\\n\");\n }\n}\n\n\n#endif\n\n\n"], ["/pogocache/src/pogocache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit pogocache.c is the primary caching engine library, which is designed\n// to be standalone and embeddable.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"pogocache.h\"\n\n#define MINLOADFACTOR_RH 55 // 55%\n#define MAXLOADFACTOR_RH 95 // 95%\n#define DEFLOADFACTOR 75 // 75%\n#define SHRINKAT 10 // 10%\n#define DEFSHARDS 4096 // default number of shards\n#define INITCAP 64 // intial number of buckets per shard\n\n// #define DBGCHECKENTRY\n// #define EVICTONITER\n// #define HALFSECONDTIME\n// #define NO48BITPTRS\n\n#if INTPTR_MAX == INT64_MAX\n#ifdef NO48BITPTRS\n#define PTRSIZE 8\n#else\n#define PTRSIZE 6\n#endif\n#elif INTPTR_MAX == INT32_MAX\n#define PTRSIZE 4\n#else\n#error Unknown pointer size\n#endif\n\nstatic struct pogocache_count_opts defcountopts = { 0 };\nstatic struct pogocache_total_opts deftotalopts = { 0 };\nstatic struct pogocache_size_opts defsizeopts = { 0 };\nstatic struct pogocache_sweep_opts defsweepopts = { 0 };\nstatic struct pogocache_clear_opts defclearopts = { 0 };\nstatic struct pogocache_store_opts defstoreopts = { 0 };\nstatic struct pogocache_load_opts defloadopts = { 0 };\nstatic struct pogocache_delete_opts defdeleteopts = { 0 };\nstatic struct pogocache_iter_opts defiteropts = { 0 };\nstatic struct pogocache_sweep_poll_opts defsweeppollopts = { 0 };\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// returns monotonic nanoseconds of the CPU clock.\nstatic int64_t gettime(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// returns offset of system clock since first call in thread.\nstatic int64_t getnow(void) {\n return gettime();\n}\n\n// https://github.com/tidwall/th64\nstatic uint64_t th64(const void *data, size_t len, uint64_t seed) {\n uint8_t*p=(uint8_t*)data,*e=p+len;\n uint64_t r=0x14020a57acced8b7,x,h=seed;\n while(p+8<=e)memcpy(&x,p,8),x*=r,p+=8,x=x<<31|x>>33,h=h*r^x,h=h<<31|h>>33;\n while(p>31,h*=r,h^=h>>31,h*=r,h^=h>>31,h*=r,h);\n}\n\n// Load a pointer from an unaligned memory.\nstatic void *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\n// Store a pointer into unaligned memory.\nstatic void store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nstatic uint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\n// Sixpack compression algorithm\n// - Converts a simple 8-bit string into 6-bit string.\n// - Intended to be used on small strings that only use characters commonly\n// used for keys in KV data stores.\n// - Allows the following 64 item character set:\n// -.0123456789:ABCDEFGHIJKLMNOPRSTUVWXY_abcdefghijklmnopqrstuvwxy\n// Note that the characters \"QZz\" are not included.\n// - Sortable and comparable using memcmp.\nstatic char tosix[256] = {\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0-15\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, // 32-47\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 0, 0, // 48-63\n 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, // 64-79\n 29, 0, 30, 31, 32, 33, 34, 35, 36, 37, 0, 0, 0, 0, 0, 38, // 80-95\n 0, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, // 96-111\n 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, // 112-127\n};\n\nstatic char fromsix[] = {\n 0, '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', '_', 'a', 'b', 'c',\n 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'\n};\n\n// 0: [000000..] bitpos: 0\n// 1: [00000011][1111....] bitpos: 6\n// 2: [00000011][11112222][22......] bitpos: 12 \n// 3: [00000011][11112222][22333333] bitpos: 18\n\n// Sixpack data\n// Fills the data in dst and returns the number of bytes filled.\n// Returns 0 if not a sixpackable.\n// The dst array must be large enough to hold packed value\nstatic int sixpack(const char *data, int len, char dst[]){\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n for (int i = 0; i < len; i++) {\n int k6v = tosix[bytes[i]];\n if (k6v == 0) {\n return 0;\n }\n if (i%4 == 0) {\n dst[j++] = k6v<<2;\n } else if (i%4 == 1) {\n dst[j-1] |= k6v>>4;\n dst[j++] = k6v<<4;\n } else if (i%4 == 2) {\n dst[j-1] |= k6v>>2;\n dst[j++] = k6v<<6;\n } else {\n dst[j-1] |= k6v;\n }\n }\n return j;\n}\n\n// (Un)sixpack data.\n// Fills the data in dst and returns the len of original data.\n// The data must be sixpacked and len must be > 0.\n// The dst array must be large enough to hold unpacked value\nstatic int unsixpack(const char *data, int len, char dst[]) {\n const unsigned char *bytes = (unsigned char*)data;\n int j = 0;\n int k = 0;\n for (int i = 0; i < len; i++) {\n if (k == 0) {\n dst[j++] = fromsix[bytes[i]>>2];\n k++;\n } else if (k == 1) {\n dst[j++] = fromsix[((bytes[i-1]<<4)|(bytes[i]>>4))&63];\n k++;\n } else {\n dst[j++] = fromsix[((bytes[i-1]<<2)|(bytes[i]>>6))&63];\n dst[j++] = fromsix[bytes[i]&63];\n k = 0;\n }\n }\n if (j > 0 && dst[j-1] == 0) {\n j--;\n }\n return j;\n}\n\n// Safely adds two int64_t values, clamping on overflow.\nstatic int64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n/// https://github.com/tidwall/varint.c\nstatic int varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nstatic int varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\n#ifdef HALFSECONDTIME\ntypedef uint32_t etime_t;\n#else\ntypedef int64_t etime_t;\n#endif\n\n\n// Mostly a copy of the pogocache_opts, but used internally\n// See the opts_to_ctx function for translation.\nstruct pgctx {\n void *(*malloc)(size_t);\n void (*free)(void*);\n size_t (*malloc_size)(void*);\n void (*yield)(void *udata);\n void (*evicted)(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata);\n void *udata;\n bool usecas;\n bool nosixpack;\n bool noevict;\n bool allowshrink;\n bool usethreadbatch;\n int nshards;\n double loadfactor;\n double shrinkfactor;\n uint64_t seed;\n};\n\n// The entry structure is a simple allocation with all the fields, being \n// variable in size, slammed together contiguously. There's a one byte header\n// that provides information about what is available in the structure.\n// The format is: (header,time,expires?,flags?,cas?,key,value)\n// The expires, flags, and cas fields are optional. The optionality depends on\n// header bit flags.\nstruct entry;\n\n// Returns the sizeof the entry struct, which takes up no space at all.\n// This would be like doing a sizeof(struct entry), if entry had a structure.\nstatic size_t entry_struct_size(void) {\n return 0;\n}\n\n// Returns the data portion of the entry, which is the entire allocation.\nstatic const uint8_t *entry_data(const struct entry *entry) {\n return (uint8_t*)entry;\n}\n\nstatic int64_t entry_expires(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n int64_t x = 0;\n if ((hdr>>0)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\nstatic int64_t entry_time(struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n etime_t etime;\n memcpy(&etime, p+1, sizeof(etime_t));\n#ifdef HALFSECONDTIME\n int64_t time = (int64_t)etime * INT64_C(500000000);\n#else \n int64_t time = etime;\n#endif \n return time;\n}\n\nstatic void entry_settime(struct entry *entry, int64_t time) {\n const uint8_t *p = entry_data(entry);\n#ifdef HALFSECONDTIME\n // Eviction time is stored as half seconds.\n etime_t etime = time / INT64_C(500000000);\n etime = etime > UINT32_MAX ? UINT32_MAX : etime;\n#else\n etime_t etime = time;\n#endif\n memcpy((uint8_t*)(p+1), &etime, sizeof(etime_t));\n}\n\nstatic int entry_alive_exp(int64_t expires, int64_t etime, int64_t now,\n int64_t cleartime)\n{\n return etime < cleartime ? POGOCACHE_REASON_CLEARED :\n expires > 0 && expires <= now ? POGOCACHE_REASON_EXPIRED :\n 0;\n}\n\nstatic int entry_alive(struct entry *entry, int64_t now, int64_t cleartime) {\n int64_t etime = entry_time(entry);\n int64_t expires = entry_expires(entry);\n return entry_alive_exp(expires, etime, now, cleartime);\n}\n\nstatic uint64_t entry_cas(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n uint64_t x = 0;\n if ((hdr>>2)&1) {\n memcpy(&x, p, 8);\n }\n return x;\n}\n\n// returns the key. If using sixpack make sure to copy the result asap.\nstatic const char *entry_key(const struct entry *entry, size_t *keylen_out,\n char buf[128])\n{\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n if ((hdr>>3)&1) {\n keylen = unsixpack(key, (int)keylen, buf);\n key = buf;\n }\n *keylen_out = keylen;\n return key;\n}\n\n// returns the raw key. sixpack will be returned in it's raw format\nstatic const char *entry_rawkey(const struct entry *entry, size_t *keylen_out) {\n const uint8_t *p = entry_data(entry);\n const uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if ((hdr>>2)&1) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n size_t keylen = x;\n char *key = (char*)p;\n *keylen_out = keylen;\n return key;\n}\n\nstatic bool entry_sixpacked(const struct entry *entry) {\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p);\n return (hdr>>3)&1;\n}\n\nstatic size_t entry_extract(const struct entry *entry, const char **key,\n size_t *keylen, char buf[128], const char **val, size_t *vallen, \n int64_t *expires, uint32_t *flags, uint64_t *cas,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n if (expires) {\n memcpy(expires, p, 8);\n }\n p += 8; // expires\n } else {\n if (expires) {\n *expires = 0;\n }\n }\n if ((hdr>>1)&1) {\n if (flags) {\n memcpy(flags, p, 4);\n }\n p += 4; // flags\n } else {\n if (flags) {\n *flags = 0;\n }\n }\n if (ctx->usecas) {\n if (cas) {\n memcpy(cas, p, 8);\n }\n p += 8; // cas\n } else {\n if (cas) {\n *cas = 0;\n }\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n if (key) {\n *key = (char*)p;\n *keylen = x;\n if ((hdr>>3)&1) {\n *keylen = unsixpack(*key, (int)*keylen, buf);\n *key = buf;\n }\n }\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n if (val) {\n *val = (char*)p;\n *vallen = x;\n }\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\nstatic size_t entry_memsize(const struct entry *entry,\n struct pgctx *ctx)\n{\n const uint8_t *p = entry_data(entry);\n uint8_t hdr = *(p++); // hdr\n p += sizeof(etime_t); // time\n if ((hdr>>0)&1) {\n p += 8; // expires\n }\n if ((hdr>>1)&1) {\n p += 4; // flags\n }\n if (ctx->usecas) {\n p += 8; // cas\n }\n uint64_t x;\n p += varint_read_u64(p, 10, &x); // keylen\n p += x; // key\n p += varint_read_u64(p, 10, &x); // vallen\n p += x; // val\n return entry_struct_size()+(p-(uint8_t*)entry);\n}\n\n// The 'cas' param should always be set to zero unless loading from disk. \n// Setting to zero will set a new unique cas to the entry.\nstatic struct entry *entry_new(const char *key, size_t keylen, const char *val,\n size_t vallen, int64_t expires, uint32_t flags, uint64_t cas,\n struct pgctx *ctx)\n{\n bool usesixpack = !ctx->nosixpack;\n#ifdef DBGCHECKENTRY\n // printf(\"entry_new(key=[%.*s], keylen=%zu, val=[%.*s], vallen=%zu, \"\n // \"expires=%\" PRId64 \", flags=%\" PRId32 \", cas=%\" PRIu64 \", \"\n // \"usesixpack=%d\\n\", (int)keylen, key, keylen, (int)vallen, key, vallen,\n // expires, flags, cas, usesixpack);\n int64_t oexpires = expires;\n uint32_t oflags = flags;\n uint64_t ocas = cas;\n const char *okey = key;\n size_t okeylen = keylen;\n const char *oval = val;\n size_t ovallen = vallen;\n#endif\n uint8_t hdr = 0;\n uint8_t keylenbuf[10];\n uint8_t vallenbuf[10];\n int nexplen, nflagslen, ncaslen, nkeylen, nvallen;\n if (expires > 0) {\n hdr |= 1;\n nexplen = 8;\n } else {\n nexplen = 0;\n }\n if (flags > 0) {\n hdr |= 2;\n nflagslen = 4;\n } else {\n nflagslen = 0;\n }\n if (ctx->usecas) {\n hdr |= 4;\n ncaslen = 8;\n } else {\n ncaslen = 0;\n }\n char buf[128];\n if (usesixpack && keylen <= 128) {\n size_t len = sixpack(key, keylen, buf);\n if (len > 0) {\n hdr |= 8;\n keylen = len;\n key = buf;\n }\n }\n nkeylen = varint_write_u64(keylenbuf, keylen);\n nvallen = varint_write_u64(vallenbuf, vallen);\n struct entry *entry_out = 0;\n size_t size = entry_struct_size()+1+sizeof(etime_t)+nexplen+nflagslen+\n ncaslen+nkeylen+keylen+nvallen+vallen;\n // printf(\"malloc=%p size=%zu, ctx=%p\\n\", ctx->malloc, size, ctx);\n void *mem = ctx->malloc(size);\n struct entry *entry = mem;\n if (!entry) {\n return 0;\n }\n uint8_t *p = (void*)entry_data(entry);\n *(p++) = hdr;\n memset(p, 0, sizeof(etime_t));\n p += sizeof(etime_t); // time\n if (nexplen > 0) {\n memcpy(p, &expires, nexplen);\n p += nexplen;\n }\n if (nflagslen > 0) {\n memcpy(p, &flags, nflagslen);\n p += nflagslen;\n }\n if (ncaslen > 0) {\n memcpy(p, &cas, ncaslen);\n p += ncaslen;\n }\n memcpy(p, keylenbuf, nkeylen);\n p += nkeylen;\n memcpy(p, key, keylen);\n p += keylen;\n memcpy(p, vallenbuf, nvallen);\n p += nvallen;\n memcpy(p, val, vallen);\n p += vallen;\n entry_out = entry;\n#ifdef DBGCHECKENTRY\n // check the key\n const char *key2, *val2;\n size_t keylen2, vallen2;\n int64_t expires2;\n uint32_t flags2;\n uint64_t cas2;\n char buf1[256];\n entry_extract(entry_out, &key2, &keylen2, buf1, &val2, &vallen2, &expires2,\n &flags2, &cas2, ctx);\n assert(expires2 == oexpires);\n assert(flags2 == oflags);\n assert(cas2 == ocas);\n assert(keylen2 == okeylen);\n assert(memcmp(key2, okey, okeylen) == 0);\n assert(vallen2 == ovallen);\n assert(memcmp(val2, oval, ovallen) == 0);\n#endif\n return entry_out;\n}\n\nstatic void entry_free(struct entry *entry, struct pgctx *ctx) {\n ctx->free(entry);\n}\n\nstatic int entry_compare(const struct entry *a, const struct entry *b) {\n size_t akeylen, bkeylen;\n char buf1[256], buf2[256];\n const char *akey;\n const char *bkey;\n if (entry_sixpacked(a) == entry_sixpacked(b)) {\n akey = entry_rawkey(a, &akeylen);\n bkey = entry_rawkey(b, &bkeylen);\n } else {\n akey = entry_key(a, &akeylen, buf1);\n bkey = entry_key(b, &bkeylen, buf2);\n }\n size_t size = akeylen < bkeylen ? akeylen : bkeylen;\n int cmp = memcmp(akey, bkey, size);\n if (cmp == 0) {\n cmp = akeylen < bkeylen ? -1 : akeylen > bkeylen;\n }\n return cmp;\n}\n\n#ifndef HASHSIZE\n#define HASHSIZE 3\n#endif\n#if HASHSIZE < 1 || HASHSIZE > 4\n#error bad hash size\n#endif\n\nstruct bucket {\n uint8_t entry[PTRSIZE]; // 48-bit pointer\n uint8_t hash[HASHSIZE]; // 24-bit hash\n uint8_t dib; // distance to bucket\n};\n\nstatic_assert(sizeof(struct bucket) == PTRSIZE+HASHSIZE+1, \"bad bucket size\");\n\nstruct map {\n int cap; // initial capacity\n int nbuckets; // number of buckets\n int count; // current entry count\n int mask; // bit mask for \n int growat;\n int shrinkat;\n struct bucket *buckets;\n uint64_t total; // current entry count\n size_t entsize; // memory size of all entries\n \n};\n\nstruct shard {\n atomic_uintptr_t lock; // spinlock (batch pointer)\n uint64_t cas; // compare and store value\n int64_t cleartime; // last clear time\n int clearcount; // number of items cleared\n struct map map; // robinhood hashmap\n // for batch linked list only\n struct shard *next;\n};\n\nstatic void lock_init(struct shard *shard) {\n atomic_init(&shard->lock, 0);\n}\n\nstruct batch {\n struct pogocache *cache; // associated cache.\n struct shard *shard; // first locked shard\n int64_t time; // timestamp\n};\n\nstruct pogocache {\n bool isbatch; \n union {\n struct pgctx ctx;\n struct batch batch;\n };\n struct shard shards[];\n};\n\nstatic struct entry *get_entry(struct bucket *bucket) {\n return load_ptr(bucket->entry);\n}\n\nstatic void set_entry(struct bucket *bucket, struct entry *entry) {\n store_ptr(bucket->entry, entry);\n}\n\n#if HASHSIZE == 1\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFF;\n}\nstatic void write_hash(uint8_t data[1], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[1]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n return hash;\n}\n#elif HASHSIZE == 2\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFF;\n}\nstatic void write_hash(uint8_t data[2], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[2]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n return hash;\n}\n#elif HASHSIZE == 3\nstatic uint32_t clip_hash(uint32_t hash) {\n return hash&0xFFFFFF;\n}\nstatic void write_hash(uint8_t data[3], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[3]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n return hash;\n}\n#else \nstatic uint32_t clip_hash(uint32_t hash) {\n return hash;\n}\nstatic void write_hash(uint8_t data[4], uint32_t hash) {\n data[0] = (hash>>0)&0xFF;\n data[1] = (hash>>8)&0xFF;\n data[2] = (hash>>16)&0xFF;\n data[3] = (hash>>24)&0xFF;\n}\n\nstatic uint32_t read_hash(uint8_t data[4]) {\n uint32_t hash = 0;\n hash |= ((uint64_t)data[0])<<0;\n hash |= ((uint64_t)data[1])<<8;\n hash |= ((uint64_t)data[2])<<16;\n hash |= ((uint64_t)data[3])<<24;\n return hash;\n}\n#endif\n\nstatic uint32_t get_hash(struct bucket *bucket) {\n return read_hash(bucket->hash);\n}\n\nstatic void set_hash(struct bucket *bucket, uint32_t hash) {\n write_hash(bucket->hash, hash);\n}\n\nstatic uint8_t get_dib(struct bucket *bucket) {\n return bucket->dib;\n}\n\nstatic void set_dib(struct bucket *bucket, uint8_t dib) {\n bucket->dib = dib;\n}\n\nstatic bool map_init(struct map *map, size_t cap, struct pgctx *ctx) {\n map->cap = cap;\n map->nbuckets = cap;\n map->count = 0;\n map->mask = map->nbuckets-1;\n map->growat = map->nbuckets * ctx->loadfactor;\n map->shrinkat = map->nbuckets * ctx->shrinkfactor;\n size_t size = sizeof(struct bucket)*map->nbuckets;\n map->buckets = ctx->malloc(size);\n if (!map->buckets) {\n // nomem\n memset(map, 0, sizeof(struct map));\n return false;\n }\n memset(map->buckets, 0, size);\n return true;\n}\n\nstatic bool resize(struct map *map, size_t new_cap, struct pgctx *ctx) {\n struct map map2;\n if (!map_init(&map2, new_cap, ctx)) {\n return false;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket ebkt = map->buckets[i];\n if (get_dib(&ebkt)) {\n set_dib(&ebkt, 1);\n size_t j = get_hash(&ebkt) & map2.mask;\n while (1) {\n if (get_dib(&map2.buckets[j]) == 0) {\n map2.buckets[j] = ebkt;\n break;\n }\n if (get_dib(&map2.buckets[j]) < get_dib(&ebkt)) {\n struct bucket tmp = map2.buckets[j];\n map2.buckets[j] = ebkt;\n ebkt = tmp;\n }\n j = (j + 1) & map2.mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n }\n }\n int org_cap = map->cap;\n int org_count = map->count;\n ctx->free(map->buckets);\n memcpy(map, &map2, sizeof(struct map));\n map->cap = org_cap;\n map->count = org_count;\n return true;\n}\n\nstatic bool map_insert(struct map *map, struct entry *entry, uint32_t hash,\n struct entry **old, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n if (map->count >= map->growat) {\n if (!resize(map, map->nbuckets*2, ctx)) {\n *old = 0;\n return false;\n }\n }\n map->entsize += entry_memsize(entry, ctx);\n struct bucket ebkt;\n set_entry(&ebkt, entry);\n set_hash(&ebkt, hash);\n set_dib(&ebkt, 1);\n size_t i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n // new entry\n map->buckets[i] = ebkt;\n map->count++;\n map->total++;\n *old = 0;\n return true;\n }\n if (get_hash(&ebkt) == get_hash(&map->buckets[i]) && \n entry_compare(get_entry(&ebkt), get_entry(&map->buckets[i])) == 0)\n {\n // replaced\n *old = get_entry(&map->buckets[i]);\n map->entsize -= entry_memsize(*old, ctx);\n set_entry(&map->buckets[i], get_entry(&ebkt));\n return true;\n }\n if (get_dib(&map->buckets[i]) < get_dib(&ebkt)) {\n struct bucket tmp = map->buckets[i];\n map->buckets[i] = ebkt;\n ebkt = tmp;\n }\n i = (i + 1) & map->mask;\n set_dib(&ebkt, get_dib(&ebkt)+1);\n }\n}\n\nstatic bool bucket_eq(struct map *map, size_t i, const char *key,\n size_t keylen, uint32_t hash)\n{\n if (get_hash(&map->buckets[i]) != hash) {\n return false;\n }\n size_t keylen2;\n char buf[128];\n const char *key2 = entry_key(get_entry(&map->buckets[i]), &keylen2, buf);\n return keylen == keylen2 && memcmp(key, key2, keylen) == 0;\n}\n\n// Returns the bucket index for key, or -1 if not found.\nstatic int map_get_bucket(struct map *map, const char *key, size_t keylen,\n uint32_t hash)\n{\n hash = clip_hash(hash);\n size_t i = hash & map->mask;\n while (1) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n return -1;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return i;\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic struct entry *map_get_entry(struct map *map, const char *key,\n size_t keylen, uint32_t hash, int *bkt_idx_out)\n{\n int i = map_get_bucket(map, key, keylen, hash);\n *bkt_idx_out = i;\n return i >= 0 ? get_entry(&map->buckets[i]) : 0;\n}\n\n// This deletes entry from bucket and adjusts the dibs buckets to right, if\n// needed.\nstatic void delbkt(struct map *map, size_t i) {\n set_dib(&map->buckets[i], 0);\n while (1) {\n size_t h = i;\n i = (i + 1) & map->mask;\n if (get_dib(&map->buckets[i]) <= 1) {\n set_dib(&map->buckets[h], 0);\n break;\n }\n map->buckets[h] = map->buckets[i];\n set_dib(&map->buckets[h], get_dib(&map->buckets[h])-1);\n }\n map->count--;\n}\n\nstatic bool needsshrink(struct map *map, struct pgctx *ctx) {\n return ctx->allowshrink && map->nbuckets > map->cap && \n map->count <= map->shrinkat;\n}\n\n// Try to shrink the hashmap. If needed, this will allocate a new hashmap that\n// has fewer buckets and move all existing entries into the smaller map.\n// The 'multi' param is a hint that multi entries may have been deleted, such\n// as with the iter or clear operations.\n// If the resize fails due to an allocation error then the existing hashmap\n// will be retained.\nstatic void tryshrink(struct map *map, bool multi, struct pgctx *ctx) {\n if (!needsshrink(map, ctx)) {\n return;\n }\n int cap;\n if (multi) {\n // Determine how many buckets are needed to store all entries.\n cap = map->cap;\n int growat = cap * ctx->loadfactor;\n while (map->count >= growat) {\n cap *= 2;\n growat = cap * ctx->loadfactor;\n }\n } else {\n // Just half the buckets\n cap = map->nbuckets / 2;\n }\n resize(map, cap, ctx);\n}\n\n// delete an entry at bucket position. not called directly\nstatic struct entry *delentry_at_bkt(struct map *map, size_t i, \n struct pgctx *ctx)\n{\n struct entry *old = get_entry(&map->buckets[i]);\n assert(old);\n map->entsize -= entry_memsize(old, ctx);\n delbkt(map, i);\n return old;\n}\n\nstatic struct entry *map_delete(struct map *map, const char *key,\n size_t keylen, uint32_t hash, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n int i = hash & map->mask;\n while (1) {\n if (get_dib(&map->buckets[i]) == 0) {\n return 0;\n }\n if (bucket_eq(map, i, key, keylen, hash)) {\n return delentry_at_bkt(map, i, ctx);\n }\n i = (i + 1) & map->mask;\n }\n}\n\nstatic size_t evict_entry(struct shard *shard, int shardidx, \n struct entry *entry, int64_t now, int reason, struct pgctx *ctx)\n{\n char buf[128];\n size_t keylen;\n const char *key = entry_key(entry, &keylen, buf);\n uint32_t hash = th64(key, keylen, ctx->seed);\n struct entry *del = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(del == entry); (void)del;\n if (ctx->evicted) {\n // Notify user that an entry was evicted.\n const char *val;\n size_t vallen;\n int64_t expires = 0;\n uint32_t flags = 0;\n uint64_t cas = 0;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val,\n vallen, expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n size_t size = entry_memsize(entry, ctx);\n entry_free(entry, ctx);\n return size;\n}\n\n// evict an entry using the 2-random algorithm.\n// Pick two random entries and delete the one with the oldest access time.\n// Do not evict the entry if it matches the provided hash.\nstatic void auto_evict_entry(struct shard *shard, int shardidx, uint32_t hash,\n int64_t now, struct pgctx *ctx)\n{\n hash = clip_hash(hash);\n struct map *map = &shard->map;\n struct entry *entries[2];\n int count = 0;\n for (int i = 1; i < map->nbuckets && count < 2; i++) {\n size_t j = (i+hash)&(map->nbuckets-1);\n struct bucket *bkt = &map->buckets[j];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry has expired. Evict this one instead.\n evict_entry(shard, shardidx, entry, now, reason, ctx);\n return;\n }\n if (get_hash(bkt) == hash) {\n continue;\n }\n entries[count++] = entry;\n }\n int choose;\n if (count == 1) {\n choose = 0;\n } else if (count == 2) {\n // We now have two candidates.\n if (entry_time(entries[0]) < entry_time(entries[1])) {\n choose = 0;\n } else {\n choose = 1;\n }\n } else {\n return;\n }\n evict_entry(shard, shardidx, entries[choose], now, POGOCACHE_REASON_LOWMEM,\n ctx);\n}\n\nstatic void shard_deinit(struct shard *shard, struct pgctx *ctx) {\n struct map *map = &shard->map;\n if (!map->buckets) {\n return;\n }\n for (int i = 0; i < map->nbuckets; i++) {\n struct bucket *bkt = &map->buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n entry_free(entry, ctx);\n }\n ctx->free(map->buckets);\n}\n\nstatic bool shard_init(struct shard *shard, struct pgctx *ctx) {\n memset(shard, 0, sizeof(struct shard));\n lock_init(shard);\n shard->cas = 1;\n if (!map_init(&shard->map, INITCAP, ctx)) {\n // nomem\n shard_deinit(shard, ctx);\n return false;\n }\n return true;\n}\n\n/// Free all cache and shard hashmap allocations.\n/// This does not access the value data in any of the entries. If it is needed\n/// for the further cleanup at an entry value level, then use the\n/// pogocache_iter to perform the cleanup on each entry before calling this\n/// operation.\n/// Also this is not threadsafe. Make sure that other threads are not\n/// currently using the cache concurrently nor after this function is called.\nvoid pogocache_free(struct pogocache *cache) {\n if (!cache) {\n return;\n }\n struct pgctx *ctx = &cache->ctx;\n for (int i = 0; i < cache->ctx.nshards; i++) {\n shard_deinit(&cache->shards[i], ctx);\n }\n cache->ctx.free(cache);\n}\n\nstatic void opts_to_ctx(int nshards, struct pogocache_opts *opts,\n struct pgctx *ctx)\n{\n ctx->nshards = nshards;\n int loadfactor = 0;\n if (opts) {\n ctx->yield = opts->yield;\n ctx->evicted = opts->evicted;\n ctx->udata = opts->udata;\n ctx->usecas = opts->usecas;\n ctx->nosixpack = opts->nosixpack;\n ctx->noevict = opts->noevict;\n ctx->seed = opts->seed;\n loadfactor = opts->loadfactor;\n ctx->allowshrink = opts->allowshrink;\n ctx->usethreadbatch = opts->usethreadbatch;\n }\n // make loadfactor a floating point\n loadfactor = loadfactor == 0 ? DEFLOADFACTOR :\n loadfactor < MINLOADFACTOR_RH ? MINLOADFACTOR_RH :\n loadfactor > MAXLOADFACTOR_RH ? MAXLOADFACTOR_RH :\n loadfactor;\n ctx->loadfactor = ((double)loadfactor/100.0);\n ctx->shrinkfactor = ((double)SHRINKAT/100.0);\n}\n\nstatic struct pogocache_opts newdefopts = { 0 };\n\n/// Returns a new cache or null if there is not enough memory available.\n/// See 'pogocache_opts' for all options.\nstruct pogocache *pogocache_new(struct pogocache_opts *opts) {\n if (!opts) {\n opts = &newdefopts;\n }\n void *(*_malloc)(size_t) = opts->malloc ? opts->malloc : malloc;\n void (*_free)(void*) = opts->free ? opts->free : free;\n int shards = !opts || opts->nshards <= 0 ? DEFSHARDS : opts->nshards;\n size_t size = sizeof(struct pogocache)+shards*sizeof(struct shard);\n struct pogocache *cache = _malloc(size);\n if (!cache) {\n return 0;\n }\n memset(cache, 0, sizeof(struct pogocache));\n struct pgctx *ctx = &cache->ctx;\n opts_to_ctx(shards, opts, ctx);\n ctx->malloc = _malloc;\n ctx->free = _free;\n for (int i = 0; i < ctx->nshards; i++) {\n if (!shard_init(&cache->shards[i], ctx)) {\n // nomem\n pogocache_free(cache);\n return 0;\n }\n }\n return cache;\n}\n\nstatic int shard_index(struct pogocache *cache, uint64_t hash) {\n return (hash>>32)%cache->ctx.nshards;\n}\n\nstatic struct shard *shard_get(struct pogocache *cache, int index) {\n return &cache->shards[index];\n}\n\n/// Returns a timestamp.\nint64_t pogocache_now(void) {\n return getnow();\n}\n\nstatic __thread struct pogocache thbatch;\n\nstruct pogocache *pogocache_begin(struct pogocache *cache) {\n struct pogocache *batch;\n if (cache->ctx.usethreadbatch) {\n batch = &thbatch;\n } else {\n batch = cache->ctx.malloc(sizeof(struct pogocache));\n if (!batch) {\n return 0;\n }\n }\n batch->isbatch = true;\n batch->batch.cache = cache;\n batch->batch.shard = 0;\n batch->batch.time = 0;\n return batch;\n}\n\nvoid pogocache_end(struct pogocache *batch) {\n assert(batch->isbatch);\n struct shard *shard = batch->batch.shard;\n while (shard) {\n struct shard *next = shard->next;\n shard->next = 0;\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE);\n shard = next;\n }\n if (!batch->batch.cache->ctx.usethreadbatch) {\n batch->batch.cache->ctx.free(batch);\n }\n}\n\nstatic void lock(struct batch *batch, struct shard *shard, struct pgctx *ctx) {\n if (batch) {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n (uintptr_t)(void*)batch, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n shard->next = batch->shard;\n batch->shard = shard;\n break;\n }\n if (val == (uintptr_t)(void*)batch) {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n } else {\n while (1) {\n uintptr_t val = 0;\n if (atomic_compare_exchange_weak_explicit(&shard->lock, &val, \n UINTPTR_MAX, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n {\n break;\n }\n if (ctx->yield) {\n ctx->yield(ctx->udata);\n }\n }\n }\n}\n\nstatic bool acquire_for_scan(int shardidx, struct shard **shard_out, \n struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *shard_out = shard;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// acquire a lock for the key\nstatic bool acquire_for_key(const char *key, size_t keylen, uint32_t *hash_out,\n struct shard **shard_out, int *shardidx_out, struct pogocache **cache_inout)\n{\n struct pogocache *cache = *cache_inout;\n struct batch *batch = 0;\n if (cache->isbatch) {\n // use batch\n batch = &cache->batch;\n cache = batch->cache;\n }\n struct pgctx *ctx = &cache->ctx;\n uint64_t fhash = th64(key, keylen, cache->ctx.seed);\n int shardidx = shard_index(cache, fhash);\n struct shard *shard = shard_get(cache, shardidx);\n lock(batch, shard, ctx);\n *hash_out = fhash;\n *shard_out = shard;\n *shardidx_out = shardidx;\n *cache_inout = cache;\n return batch != 0;\n}\n\n// Acquire a lock on the shard for key and execute the provided operation.\n#define ACQUIRE_FOR_KEY_AND_EXECUTE(rettype, key, keylen, op) ({ \\\n int shardidx; \\\n uint32_t hash; \\\n struct shard *shard; \\\n bool usebatch = acquire_for_key((key), (keylen), &hash, &shard, &shardidx, \\\n &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)shardidx, (void)hash, (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\n// Acquire a lock on the shard at index and execute the provided operation.\n#define ACQUIRE_FOR_SCAN_AND_EXECUTE(rettype, shardidx, op) ({ \\\n struct shard *shard; \\\n bool usebatch = acquire_for_scan((shardidx), &shard, &cache); \\\n struct pgctx *ctx = &cache->ctx; \\\n (void)ctx; \\\n rettype status = op; \\\n if (!usebatch) { \\\n atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \\\n } \\\n status; \\\n})\n\nstatic int loadop(const void *key, size_t keylen, \n struct pogocache_load_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defloadopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n // Get the entry bucket index for the entry with key.\n int bidx = map_get_bucket(&shard->map, key, keylen, hash);\n if (bidx == -1) {\n return POGOCACHE_NOTFOUND;\n }\n // Extract the bucket, entry, and values.\n struct bucket *bkt = &shard->map.buckets[bidx];\n struct entry *entry = get_entry(bkt);\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. Evict the entry and clear the bucket.\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(entry, ctx);\n delbkt(&shard->map, bidx);\n return POGOCACHE_NOTFOUND;\n }\n if (!opts->notouch) {\n entry_settime(entry, now);\n }\n if (opts->entry) {\n struct pogocache_update *update = 0;\n opts->entry(shardidx, now, key, keylen, val, vallen, expires, flags,\n cas, &update, opts->udata);\n if (update) {\n // User wants to update the entry.\n shard->cas++;\n struct entry *entry2 = entry_new(key, keylen, update->value,\n update->valuelen, update->expires, update->flags, shard->cas, \n ctx);\n if (!entry2) {\n return POGOCACHE_NOMEM;\n }\n entry_settime(entry2, now);\n set_entry(bkt, entry2);\n entry_free(entry, ctx);\n }\n }\n return POGOCACHE_FOUND;\n}\n\n/// Loads an entry from the cache.\n/// Use the pogocache_load_opts.entry callback to access the value of the entry.\n/// It's possible to update the value using the 'update' param in the callback.\n/// See 'pogocache_load_opts' for all options.\n/// @returns POGOCACHE_FOUND when the entry was found.\n/// @returns POGOCACHE_NOMEM when the entry cannot be updated due to no memory.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\nint pogocache_load(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_load_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen, \n loadop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int deleteop(const void *key, size_t keylen, \n struct pogocache_delete_opts *opts, struct shard *shard, int shardidx, \n uint32_t hash, struct pgctx *ctx)\n{\n opts = opts ? opts : &defdeleteopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n struct entry *entry = map_delete(&shard->map, key, keylen, hash, ctx);\n if (!entry) {\n // Entry does not exist\n return POGOCACHE_NOTFOUND;\n }\n const char *val;\n size_t vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n // Entry is no longer alive. It was already deleted from the map but\n // we still need to notify the user.\n if (ctx->evicted) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (opts->entry) {\n entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen,\n expires, flags, cas, opts->udata))\n {\n // User canceled the delete. Put it back into the map.\n // This insert will not cause an allocation error because the \n // previous delete operation left us with at least one available\n // bucket.\n struct entry *old;\n bool ok = map_insert(&shard->map, entry, hash, &old, ctx);\n assert(ok); (void)ok;\n assert(!old);\n return POGOCACHE_CANCELED;\n }\n }\n // Entry was successfully deleted.\n tryshrink(&shard->map, false, ctx);\n entry_free(entry, ctx);\n return POGOCACHE_DELETED;\n}\n\n/// Deletes an entry from the cache.\n/// See 'pogocache_delete_opts' for all options.\n/// @returns POGOCACHE_DELETED when the entry was successfully deleted.\n/// @returns POGOCACHE_NOTFOUND when the entry was not found.\n/// @returns POGOCACHE_CANCELED when opts.entry callback returned false.\nint pogocache_delete(struct pogocache *cache, const void *key, size_t keylen, \n struct pogocache_delete_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n deleteop(key, keylen, opts, shard, shardidx, hash, ctx)\n );\n}\n\nstatic int storeop(const void *key, size_t keylen, const void *val,\n size_t vallen, struct pogocache_store_opts *opts, struct shard *shard,\n int shardidx, uint32_t hash, struct pgctx *ctx)\n{\n int count = shard->map.count;\n opts = opts ? opts : &defstoreopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int64_t expires = 0;\n if (opts->expires > 0) {\n expires = opts->expires;\n } else if (opts->ttl > 0) {\n expires = int64_add_clamp(now, opts->ttl);\n }\n if (opts->keepttl) {\n // User wants to keep the existing ttl. Get the existing entry from the\n // map first and take its expiration.\n int i;\n struct entry *old = map_get_entry(&shard->map, key, keylen, hash, &i);\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason == 0) {\n expires = entry_expires(old);\n }\n }\n }\n shard->cas++;\n struct entry *entry = entry_new(key, keylen, val, vallen, expires,\n opts->flags, shard->cas, ctx);\n if (!entry) {\n goto nomem;\n }\n entry_settime(entry, now);\n if (opts->lowmem && ctx->noevict) {\n goto nomem;\n }\n // Insert new entry into map\n struct entry *old;\n if (!map_insert(&shard->map, entry, hash, &old, ctx)) {\n goto nomem;\n }\n if (old) {\n int reason = entry_alive(old, now, shard->cleartime);\n if (reason) {\n // There's an old entry, but it's no longer alive.\n // Treat this like an eviction and notify the user.\n if (ctx->evicted) {\n const char *oval;\n size_t ovallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0,\n &oval, &ovallen, &oexpires, &oflags, &ocas, ctx);\n ctx->evicted(shardidx, reason, now, key, keylen, oval, ovallen,\n oexpires, oflags, ocas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n entry_free(old, ctx);\n old = 0;\n }\n }\n int put_back_status = 0;\n if (old) {\n if (opts->casop) {\n // User is requesting the cas operation.\n if (ctx->usecas) {\n uint64_t old_cas = entry_cas(old);\n if (opts->cas != old_cas) {\n // CAS test failed.\n // printf(\". cas failed: expected %\" PRIu64 \", \"\n // \"got %\" PRIu64 \"\\n\", cas, old_cas);\n put_back_status = POGOCACHE_FOUND;\n }\n } else {\n put_back_status = POGOCACHE_FOUND;\n }\n } else if (opts->nx) {\n put_back_status = POGOCACHE_FOUND;\n }\n if (put_back_status) {\n put_back:;\n // The entry needs be put back into the map and operation must\n // return early.\n // This insert operation must not fail since the entry 'e' and\n // 'old' both exist and will always be bucket swapped. There will\n // never be a new allocation.\n struct entry *e = 0;\n bool ok = map_insert(&shard->map, old, hash, &e, ctx);\n assert(ok); (void)ok;\n assert(e == entry);\n entry_free(entry, ctx);\n return put_back_status;\n }\n } else if (opts->xx || opts->casop) {\n // The new entry must not be inserted.\n // Delete it and return early.\n struct entry *e = map_delete(&shard->map, key, keylen, hash, ctx);\n assert(e == entry); (void)e;\n entry_free(entry, ctx);\n return POGOCACHE_NOTFOUND;\n }\n if (old && opts->entry) {\n // User is requesting to verify the old entry before allowing it to be\n // replaced by the new entry.\n const char *val;\n size_t vallen;\n int64_t oexpires = 0;\n uint32_t oflags = 0;\n uint64_t ocas = 0;\n entry_extract(old, 0, 0, 0, &val, &vallen, &oexpires, &oflags, &ocas,\n ctx);\n if (!opts->entry(shardidx, now, key, keylen, val, vallen, oexpires,\n oflags, ocas, opts->udata))\n {\n // User wants to keep the old entry.\n put_back_status = POGOCACHE_CANCELED;\n goto put_back;\n }\n }\n // The new entry was inserted.\n if (old) {\n entry_free(old, ctx);\n return POGOCACHE_REPLACED;\n } else {\n if (opts->lowmem && shard->map.count > count) {\n // The map grew by one bucket, yet the user indicates that there is\n // a low memory event. Evict one entry.\n auto_evict_entry(shard, shardidx, hash, now, ctx);\n }\n return POGOCACHE_INSERTED;\n }\nnomem:\n entry_free(entry, ctx);\n return POGOCACHE_NOMEM;\n}\n\n/// Insert or replace an entry in the cache.\n/// If an entry with the same key already exists then the cache then the \n/// the opts.entry callback can be used to check the existing\n/// value first, allowing the operation to be canceled.\n/// See 'pogocache_store_opts' for all options.\n/// @returns POGOCACHE_INSERTED when the entry was inserted.\n/// @returns POGOCACHE_REPLACED when the entry replaced an existing one.\n/// @returns POGOCACHE_FOUND when the entry already exists. (cas/nx)\n/// @returns POGOCACHE_CANCELED when the operation was canceled.\n/// @returns POGOCACHE_NOMEM when there is system memory available.\nint pogocache_store(struct pogocache *cache, const void *key, size_t keylen, \n const void *val, size_t vallen, struct pogocache_store_opts *opts)\n{\n return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,\n storeop(key, keylen, val, vallen, opts, shard, shardidx, hash, ctx)\n );\n}\n\n\nstatic struct pogocache *rootcache(struct pogocache *cache) {\n return cache->isbatch ? cache->batch.cache : cache;\n}\n\n/// Returns the number of shards in cache\nint pogocache_nshards(struct pogocache *cache) {\n cache = rootcache(cache);\n return cache->ctx.nshards;\n}\n\nstatic int iterop(struct shard *shard, int shardidx, int64_t now,\n struct pogocache_iter_opts *opts, struct pgctx *ctx)\n{\n char buf[128];\n int status = POGOCACHE_FINISHED;\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen,\n &expires, &flags, &cas, ctx);\n int reason = entry_alive(entry, now, shard->cleartime);\n if (reason) {\n#ifdef EVICTONITER\n if (ctx->evicted) {\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n // Delete entry at bucket.\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n#endif\n } else {\n // Entry is alive, check with user for next action.\n int action = POGOCACHE_ITER_CONTINUE;\n if (opts->entry) {\n action = opts->entry(shardidx, now, key, keylen, val,\n vallen, expires, flags, cas, opts->udata);\n }\n if (action != POGOCACHE_ITER_CONTINUE) {\n if (action&POGOCACHE_ITER_DELETE) {\n // Delete entry at bucket\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n i--;\n }\n if (action&POGOCACHE_ITER_STOP) {\n status = POGOCACHE_CANCELED;\n break;\n }\n }\n }\n }\n tryshrink(&shard->map, true, ctx);\n return status;\n}\n\n/// Iterate over entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The pogocache_iter_opts.entry callback can be used to perform actions such\n/// as: deleting entries and stopping iteration early. \n/// See 'pogocache_iter_opts' for all options.\n/// @return POGOCACHE_FINISHED if iteration completed\n/// @return POGOCACHE_CANCELED if iteration stopped early\nint pogocache_iter(struct pogocache *cache, struct pogocache_iter_opts *opts) {\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defiteropts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return POGOCACHE_FINISHED;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n iterop(shard, opts->oneshardidx, now, opts, &cache->ctx)\n );\n }\n for (int i = 0; i < nshards; i++) {\n int status = ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n iterop(shard, i, now, opts, &cache->ctx)\n );\n if (status != POGOCACHE_FINISHED) {\n return status;\n }\n }\n return POGOCACHE_FINISHED;\n}\n\nstatic size_t countop(struct shard *shard) {\n return shard->map.count - shard->clearcount;\n}\n\n/// Returns the number of entries in the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_count(struct pogocache *cache,\n struct pogocache_count_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defcountopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n countop(shard);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n countop(shard);\n );\n }\n return count;\n}\n\nstatic uint64_t totalop(struct shard *shard) {\n return shard->map.total;\n}\n\n/// Returns the total number of entries that have ever been stored in the cache.\n/// For the current number of entries use pogocache_count().\n/// There's an option to allow for isolating the operation to a single shard.\nuint64_t pogocache_total(struct pogocache *cache,\n struct pogocache_total_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &deftotalopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, opts->oneshardidx,\n totalop(shard);\n );\n }\n uint64_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, i,\n totalop(shard);\n );\n }\n return count;\n}\n\nstatic size_t sizeop(struct shard *shard, bool entriesonly) {\n size_t size = 0;\n if (!entriesonly) {\n size += sizeof(struct shard);\n size += sizeof(struct bucket)*shard->map.nbuckets;\n }\n size += shard->map.entsize;\n return size;\n}\n\n/// Returns the total memory size of the shard.\n/// This includes the memory size of all data structures and entries.\n/// Use the entriesonly option to limit the result to only the entries.\n/// There's an option to allow for isolating the operation to a single shard.\nsize_t pogocache_size(struct pogocache *cache,\n struct pogocache_size_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsizeopts;\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return 0;\n }\n return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,\n sizeop(shard, opts->entriesonly);\n );\n }\n size_t count = 0;\n for (int i = 0; i < nshards; i++) {\n count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,\n sizeop(shard, opts->entriesonly);\n );\n }\n return count;\n}\n\n\n\nstatic int sweepop(struct shard *shard, int shardidx, int64_t now,\n size_t *swept, size_t *kept, struct pgctx *ctx)\n{\n char buf[128];\n for (int i = 0; i < shard->map.nbuckets; i++) {\n struct bucket *bkt = &shard->map.buckets[i];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n int64_t expires = entry_expires(entry);\n int64_t etime = entry_time(entry);\n int reason = entry_alive_exp(expires, etime, now, shard->cleartime);\n if (reason == 0) {\n // entry is still alive\n (*kept)++;\n continue;\n }\n // entry is no longer alive.\n if (ctx->evicted) {\n const char *key, *val;\n size_t keylen, vallen;\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n entry_extract(entry, &key, &keylen, buf, &val, &vallen, &expires,\n &flags, &cas, ctx);\n // Report eviction to user\n ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,\n expires, flags, cas, ctx->udata);\n }\n shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);\n delbkt(&shard->map, i);\n entry_free(entry, ctx);\n (*swept)++;\n // Entry was deleted from bucket, which may move entries to the right\n // over one bucket to the left. So we need to check the same bucket\n // again.\n i--;\n }\n tryshrink(&shard->map, true, ctx);\n return 0;\n}\n\n/// Remove expired entries from the cache.\n/// There's an option to allow for isolating the operation to a single shard.\n/// The final 'kept' or 'swept' counts are returned.\n/// @return POGOCACHE_FINISHED when iteration completed\n/// @return POGOCACHE_CANCELED when iteration stopped early\nvoid pogocache_sweep(struct pogocache *cache, size_t *swept, size_t *kept, \n struct pogocache_sweep_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweepopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n size_t sweptc = 0;\n size_t keptc = 0;\n if (opts->oneshard) {\n if (opts->oneshardidx >= 0 && opts->oneshardidx < nshards) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n sweepop(shard, opts->oneshardidx, now, &sweptc, &keptc,\n &cache->ctx);\n );\n }\n } else {\n for (int i = 0; i < nshards; i++) {\n size_t sweptc2 = 0;\n size_t keptc2 = 0;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n sweepop(shard, i, now, &sweptc2, &keptc2, &cache->ctx);\n );\n sweptc += sweptc2;\n keptc += keptc2;\n }\n }\n if (swept) {\n *swept = sweptc;\n }\n if (kept) {\n *kept = keptc;\n }\n}\n\nstatic int clearop(struct shard *shard, int shardidx, int64_t now, \n struct pgctx *ctx)\n{\n (void)shardidx, (void)ctx;\n shard->cleartime = now;\n shard->clearcount += (shard->map.count-shard->clearcount);\n return 0;\n}\n\n/// Clear the cache.\n/// There's an option to allow for isolating the operation to a single shard.\nvoid pogocache_clear(struct pogocache *cache, struct pogocache_clear_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defclearopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n if (opts->oneshard) {\n if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {\n return;\n }\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,\n clearop(shard, opts->oneshardidx, now, &cache->ctx);\n );\n return;\n }\n for (int i = 0; i < cache->ctx.nshards; i++) {\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,\n clearop(shard, i, now, &cache->ctx);\n );\n }\n}\n\nstatic int sweeppollop(struct shard *shard, int shardidx, int64_t now, \n int pollsize, double *percent)\n{\n // start at random bucket\n int count = 0;\n int dead = 0;\n int bidx = mix13(now+shardidx)%shard->map.nbuckets;\n for (int i = 0; i < shard->map.nbuckets && count < pollsize; i++) {\n struct bucket *bkt = &shard->map.buckets[(bidx+i)%shard->map.nbuckets];\n if (get_dib(bkt) == 0) {\n continue;\n }\n struct entry *entry = get_entry(bkt);\n count++;\n dead += (entry_alive(entry, now, shard->cleartime) != 0);\n }\n if (count == 0) {\n *percent = 0;\n return 0;\n }\n *percent = (double)dead/(double)count;\n return 0;\n}\n\ndouble pogocache_sweep_poll(struct pogocache *cache, \n struct pogocache_sweep_poll_opts *opts)\n{\n int nshards = pogocache_nshards(cache);\n opts = opts ? opts : &defsweeppollopts;\n int64_t now = opts->time > 0 ? opts->time : getnow();\n int pollsize = opts->pollsize == 0 ? 20 : opts->pollsize;\n \n // choose a random shard\n int shardidx = mix13(now)%nshards;\n double percent;\n ACQUIRE_FOR_SCAN_AND_EXECUTE(int, shardidx,\n sweeppollop(shard, shardidx, now, pollsize, &percent);\n );\n return percent;\n}\n"], ["/pogocache/src/save.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit save.c provides an interface for saving and loading Pogocache\n// data files.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"pogocache.h\"\n#include \"buf.h\"\n#include \"util.h\"\n#include \"lz4.h\"\n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#define BLOCKSIZE 1048576\n#define COMPRESS\n\nextern struct pogocache *cache;\nextern const int verb;\n\nstruct savectx {\n pthread_t th; // work thread\n int index; // thread index\n pthread_mutex_t *lock; // write lock\n int fd; // work file descriptor\n int start; // current shard\n int count; // number of shards to process\n struct buf buf; // block buffer\n bool ok; // final ok\n int errnum; // final errno status\n struct buf dst; // compressed buffer space\n size_t nentries; // number of entried in block buffer\n};\n\nstatic int flush(struct savectx *ctx) {\n if (ctx->nentries == 0) {\n ctx->buf.len = 0;\n return 0;\n }\n // Make sure that there's enough space in the dst buffer to store the\n // header (16 bytes) and the compressed data.\n size_t bounds = LZ4_compressBound(ctx->buf.len);\n buf_ensure(&ctx->dst, 16+bounds);\n // Compress the block\n uint32_t len = LZ4_compress_default((char*)ctx->buf.data, \n (char*)ctx->dst.data+16, ctx->buf.len, bounds);\n // The block is now compressed.\n // Genreate a checksum of the compressed data.\n uint32_t crc = crc32(ctx->dst.data+16, len);\n // Write the 16 byte header\n // (0-3) 'POGO' tag\n memcpy(ctx->dst.data, \"POGO\", 4);\n // (4-7) Checksum\n write_u32(ctx->dst.data+4, crc);\n // (8-11) Len of decompressed data \n write_u32(ctx->dst.data+8, ctx->buf.len);\n // (12-15) Len of compressed data \n write_u32(ctx->dst.data+12, len);\n // The rest of the dst buffer contains the compressed bytes\n uint8_t *p = (uint8_t*)ctx->dst.data;\n uint8_t *end = p + len+16;\n bool ok = true;\n pthread_mutex_lock(ctx->lock);\n while (p < end) {\n ssize_t n = write(ctx->fd, p, end-p);\n if (n < 0) {\n ok = false;\n break;\n }\n p += n;\n }\n pthread_mutex_unlock(ctx->lock);\n ctx->buf.len = 0;\n ctx->nentries = 0;\n return ok ? 0 : -1;\n};\n\nstatic int save_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard;\n struct savectx *ctx = udata;\n buf_append_byte(&ctx->buf, 0); // entry type. zero=k/v string pair;\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n buf_append_uvarint(&ctx->buf, valuelen);\n buf_append(&ctx->buf, value, valuelen);\n if (expires > 0) {\n int64_t ttl = expires-time;\n assert(ttl > 0);\n buf_append_uvarint(&ctx->buf, ttl);\n } else {\n buf_append_uvarint(&ctx->buf, 0);\n }\n buf_append_uvarint(&ctx->buf, flags);\n buf_append_uvarint(&ctx->buf, cas);\n ctx->nentries++;\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void *thsave(void *arg) {\n struct savectx *ctx = arg;\n for (int i = 0; i < ctx->count; i++) {\n int shardidx = ctx->start+i;\n struct pogocache_iter_opts opts = {\n .oneshard = true,\n .oneshardidx = shardidx,\n .time = sys_now(),\n .entry = save_entry,\n .udata = ctx,\n };\n // write the unix timestamp before entries\n buf_append_uvarint(&ctx->buf, sys_unixnow());\n int status = pogocache_iter(cache, &opts);\n if (status == POGOCACHE_CANCELED) {\n goto done;\n }\n if (flush(ctx) == -1) {\n goto done;\n }\n }\n ctx->ok = true;\ndone:\n buf_clear(&ctx->buf);\n buf_clear(&ctx->dst);\n ctx->errnum = errno;\n return 0;\n}\n\nint save(const char *path, bool fast) {\n uint64_t seed = sys_seed();\n size_t psize = strlen(path)+32;\n char *workpath = xmalloc(psize);\n snprintf(workpath, psize, \"%s.%08x.pogocache.work\", path, \n (int)(seed%INT_MAX));\n if (verb > 1) {\n printf(\". Saving to work file %s\\n\", workpath);\n }\n int fd = open(workpath, O_RDWR|O_CREAT, S_IRUSR|S_IRGRP|S_IROTH);\n if (fd == -1) {\n return -1;\n }\n int nshards = pogocache_nshards(cache);\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n if (!fast) {\n nprocs = 1;\n }\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n struct savectx *ctxs = xmalloc(nprocs*sizeof(struct savectx));\n memset(ctxs, 0, nprocs*sizeof(struct savectx));\n bool ok = false;\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n ctx->index = i;\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->fd = fd;\n ctx->lock = &lock;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (nprocs > 1) {\n if (pthread_create(&ctx->th, 0, thsave, ctx) == -1) {\n ctx->th = 0;\n }\n }\n start += ctx->count;\n }\n // execute operations on failed threads (or fast=false)\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thsave(ctx);\n }\n }\n // wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n // check for any failures\n for (int i = 0; i < nprocs; i++) {\n struct savectx *ctx = &ctxs[i];\n if (!ctx->ok) {\n errno = ctx->errnum;\n goto done;\n }\n }\n // Move file work file to final path\n if (rename(workpath, path) == -1) {\n goto done;\n }\n ok = true;\ndone:\n close(fd);\n unlink(workpath);\n xfree(workpath);\n xfree(ctxs);\n return ok ? 0 : -1;\n}\n\n// compressed block\nstruct cblock {\n struct buf cdata; // compressed data\n size_t dlen; // decompressed size\n};\n\nstruct loadctx {\n pthread_t th;\n\n // shared context\n pthread_mutex_t *lock;\n pthread_cond_t *cond;\n bool *donereading; // shared done flag\n int *nblocks; // number of blocks in queue\n struct cblock *blocks; // the block queue\n bool *failure; // a thread will set this upon error\n\n // thread status\n atomic_bool ok;\n int errnum;\n size_t ninserted;\n size_t nexpired;\n};\n\nstatic bool load_block(struct cblock *block, struct loadctx *ctx) {\n (void)ctx;\n bool ok = false;\n\n int64_t now = sys_now();\n int64_t unixnow = sys_unixnow();\n\n // decompress block\n char *ddata = xmalloc(block->dlen);\n int ret = LZ4_decompress_safe(block->cdata.data, ddata, block->cdata.len, \n block->dlen);\n if (ret < 0 || (size_t)ret != block->dlen) {\n printf(\". bad compressed block\\n\");\n goto done;\n }\n buf_clear(&block->cdata);\n uint8_t *p = (void*)ddata;\n uint8_t *e = p + block->dlen;\n\n int n;\n uint64_t x;\n // read unix time\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n printf(\". bad unix time\\n\");\n goto done;\n }\n p += n;\n\n int64_t unixtime = x;\n // printf(\". unixtime=%lld\\n\", unixtime);\n\n // Read each entry from decompressed data\n while (e > p) {\n /////////////////////\n // kind\n uint8_t kind = *(p++);\n \n if (kind != 0) {\n // only k/v strings allowed at this time.\n printf(\">> %d\\n\", kind);\n printf(\". unknown kind\\n\");\n goto done;\n }\n /////////////////////\n // key\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t keylen = x;\n if ((size_t)(e-p) < keylen) {\n goto done;\n }\n const uint8_t *key = p;\n p += keylen;\n /////////////////////\n // val\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > SIZE_MAX) {\n goto done;\n }\n p += n;\n size_t vallen = x;\n if ((size_t)(e-p) < vallen) {\n goto done;\n }\n const uint8_t *val = p;\n p += vallen;\n /////////////////////\n // ttl\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || (int64_t)x < 0) {\n goto done;\n }\n int64_t ttl = x;\n p += n;\n /////////////////////\n // flags\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0 || x > UINT32_MAX) {\n goto done;\n }\n uint32_t flags = x;\n p += n;\n /////////////////////\n // cas\n n = varint_read_u64(p, e-p, &x);\n if (n <= 0) {\n goto done;\n }\n uint64_t cas = x;\n p += n;\n if (ttl > 0) {\n int64_t unixexpires = int64_add_clamp(unixtime, ttl);\n if (unixexpires < unixnow) {\n // already expired, skip this entry\n ctx->nexpired++;\n continue;\n }\n ttl = unixexpires-unixnow;\n }\n struct pogocache_store_opts opts = {\n .flags = flags,\n .time = now,\n .ttl = ttl,\n .cas = cas,\n };\n // printf(\"[%.*s]=[%.*s]\\n\", (int)keylen, key, (int)vallen, val);\n int ret = pogocache_store(cache, key, keylen, val, vallen, &opts);\n (void)ret;\n assert(ret == POGOCACHE_INSERTED || ret == POGOCACHE_REPLACED);\n ctx->ninserted++;\n }\n ok = true;\ndone:\n buf_clear(&block->cdata);\n xfree(ddata);\n if (!ok) {\n printf(\". bad block\\n\");\n }\n return ok;\n}\n\nstatic void *thload(void *arg) {\n struct loadctx *ctx = arg;\n pthread_mutex_lock(ctx->lock);\n while (1) {\n if (*ctx->failure) {\n break;\n }\n if (*ctx->nblocks > 0) {\n // Take a block for processing\n struct cblock block = ctx->blocks[(*ctx->nblocks)-1];\n (*ctx->nblocks)--;\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n ctx->ok = load_block(&block, ctx);\n pthread_mutex_lock(ctx->lock);\n if (!ctx->ok) {\n *ctx->failure = true;\n break;\n }\n // next block\n continue;\n }\n if (*ctx->donereading) {\n break;\n }\n pthread_cond_wait(ctx->cond, ctx->lock);\n }\n pthread_mutex_unlock(ctx->lock);\n pthread_cond_broadcast(ctx->cond); // notify reader thread\n if (!ctx->ok) {\n ctx->errnum = errno;\n }\n return 0;\n}\n\n// load data into cache from path\nint load(const char *path, bool fast, struct load_stats *stats) {\n // Use a single stream reader. Handing off blocks to threads.\n struct load_stats sstats;\n if (!stats) {\n stats = &sstats;\n }\n memset(stats, 0, sizeof(struct load_stats));\n\n int fd = open(path, O_RDONLY);\n if (fd == -1) {\n return -1;\n }\n\n pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;\n pthread_cond_t cond = PTHREAD_COND_INITIALIZER;\n bool donereading = false;\n bool failure = false;\n\n int nprocs = fast ? sys_nprocs() : 1;\n struct loadctx *ctxs = xmalloc(nprocs*sizeof(struct loadctx));\n memset(ctxs, 0, nprocs*sizeof(struct loadctx));\n int nblocks = 0;\n struct cblock *blocks = xmalloc(sizeof(struct cblock)*nprocs);\n memset(blocks, 0, sizeof(struct cblock)*nprocs);\n int therrnum = 0;\n bool ok = true;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n ctx->lock = &lock;\n ctx->cond = &cond;\n ctx->donereading = &donereading;\n ctx->nblocks = &nblocks;\n ctx->failure = &failure;\n ctx->blocks = blocks;\n atomic_init(&ctx->ok, true);\n if (pthread_create(&ctx->th, 0, thload, ctx) == -1) {\n ctx->th = 0;\n ok = false;\n if (therrnum == 0) {\n therrnum = errno;\n }\n }\n }\n if (!ok) {\n // there was an error creating a thread. \n // At this point there may be some orphaned threads waiting on \n // a condition variable. \n goto shutdown_threads;\n }\n\n // Read the blocks from file, one at a time, handing putting blocks into\n // the 'blocks' queue. The running threads will pick these up and \n // process them in no specific order.\n struct buf cdata = { 0 };\n bool shortread = false;\n while (ok) {\n uint8_t head[16];\n ssize_t size = read(fd, head, 16);\n if (size <= 0) {\n if (size == -1) {\n ok = false;\n }\n break;\n }\n if (size < 16) {\n printf(\". bad head size\\n\");\n ok = false;\n break;\n }\n if (memcmp(head, \"POGO\", 4) != 0) {\n printf(\". missing 'POGO'\\n\");\n ok = false;\n break;\n }\n uint32_t crc;\n memcpy(&crc, head+4, 4);\n size_t dlen = read_u32(head+8);\n size_t clen = read_u32(head+12);\n buf_ensure(&cdata, clen);\n bool okread = true;\n size_t total = 0;\n while (total < clen) {\n ssize_t rlen = read(fd, cdata.data+total, clen-total);\n if (rlen <= 0) {\n shortread = true;\n okread = false;\n break;\n }\n total += rlen;\n }\n if (!okread) {\n if (shortread) {\n printf(\". shortread\\n\");\n }\n ok = false;\n break;\n }\n cdata.len = clen;\n stats->csize += clen;\n stats->dsize += dlen;\n uint32_t crc2 = crc32(cdata.data, clen);\n if (crc2 != crc) {\n printf(\". bad crc\\n\");\n ok = false;\n goto bdone;\n }\n // We have a good block. Push it into the queue\n pthread_mutex_lock(&lock);\n while (1) {\n if (failure) {\n // A major error occured, stop reading now\n ok = false;\n break;\n }\n if (nblocks == nprocs) {\n // Queue is currently filled up.\n // Wait and try again.\n pthread_cond_wait(&cond, &lock);\n continue;\n }\n // Add block to queue\n blocks[nblocks++] = (struct cblock){ \n .cdata = cdata,\n .dlen = dlen,\n };\n memset(&cdata, 0, sizeof(struct buf));\n pthread_cond_broadcast(&cond);\n break;\n }\n pthread_mutex_unlock(&lock);\n }\nbdone:\n buf_clear(&cdata);\n\n\nshutdown_threads:\n // Stop all threads\n pthread_mutex_lock(&lock);\n donereading = true;\n pthread_mutex_unlock(&lock);\n pthread_cond_broadcast(&cond);\n\n // Wait for threads to finish\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n stats->nexpired += ctx->nexpired;\n stats->ninserted += ctx->ninserted;\n }\n }\n\n // Get the current error, if any\n errno = 0;\n ok = ok && !failure;\n if (!ok) {\n errno = therrnum;\n for (int i = 0; i < nprocs; i++) {\n struct loadctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n if (!ctx->ok) {\n errno = ctx->errnum;\n break;\n }\n }\n }\n }\n\n // Free all resources.\n for (int i = 0; i < nblocks; i++) {\n buf_clear(&blocks[i].cdata);\n }\n xfree(blocks);\n xfree(ctxs);\n close(fd);\n return ok ? 0 : -1;\n}\n\n// removes all work files and checks that the current directory is valid.\nbool cleanwork(const char *persist) {\n if (*persist == '\\0') {\n return false;\n }\n bool ok = false;\n char *path = xmalloc(strlen(persist)+1);\n strcpy(path, persist);\n char *dirpath = dirname(path);\n DIR *dir = opendir(dirpath);\n if (!dir) {\n perror(\"# opendir\");\n goto done;\n }\n struct dirent *entry;\n while ((entry = readdir(dir))) {\n if (entry->d_type != DT_REG) {\n continue;\n }\n const char *ext = \".pogocache.work\";\n if (strlen(entry->d_name) < strlen(ext) ||\n strcmp(entry->d_name+strlen(entry->d_name)-strlen(ext), ext) != 0)\n {\n continue;\n }\n size_t filepathcap = strlen(dirpath)+1+strlen(entry->d_name)+1;\n char *filepath = xmalloc(filepathcap);\n snprintf(filepath, filepathcap, \"%s/%s\", dirpath, entry->d_name);\n if (unlink(filepath) == 0) {\n printf(\"# deleted work file %s\\n\", filepath);\n } else {\n perror(\"# unlink\");\n }\n xfree(filepath);\n }\n ok = true;\ndone:\n if (dir) {\n closedir(dir);\n }\n xfree(path);\n return ok;\n}\n"], ["/pogocache/src/postgres.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit postgres.c provides the parser for the Postgres wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n\n// #define PGDEBUG\n\n#define TEXTOID 25\n#define BYTEAOID 17\n\nextern const char *version;\nextern const char *auth;\n\n#ifdef PGDEBUG\n#define dprintf printf\n#else\n#define dprintf(...)\n#endif\n\nstatic void print_packet(const char *data, size_t len) {\n dprintf(\". PACKET=%03zu [ \", len);\n for (size_t i = 0; i < len; i++) {\n printf(\"%02X \", (unsigned char)data[i]);\n }\n dprintf(\"]\\n\");\n dprintf(\". [\");\n for (size_t i = 0; i < len; i++) {\n unsigned char ch = data[i];\n if (ch < ' ') {\n ch = '?';\n }\n dprintf(\"%c\", ch);\n }\n dprintf(\"]\\n\");\n}\n\nstatic int32_t read_i32(const char *data) {\n return ((uint32_t)(uint8_t)data[0] << 24) |\n ((uint32_t)(uint8_t)data[1] << 16) |\n ((uint32_t)(uint8_t)data[2] << 8) |\n ((uint32_t)(uint8_t)data[3] << 0);\n}\n\nstatic void write_i32(char *data, int32_t x) {\n data[0] = (uint8_t)(((uint32_t)x) >> 24) & 0xFF;\n data[1] = (uint8_t)(((uint32_t)x) >> 16) & 0xFF;\n data[2] = (uint8_t)(((uint32_t)x) >> 8) & 0xFF;\n data[3] = (uint8_t)(((uint32_t)x) >> 0) & 0xFF;\n}\n\nstatic int16_t read_i16(const char *data) {\n return ((uint16_t)(uint8_t)data[0] << 8) |\n ((uint16_t)(uint8_t)data[1] << 0);\n}\nstatic void write_i16(char *data, int16_t x) {\n data[0] = (uint8_t)(((uint16_t)x) >> 8) & 0xFF;\n data[1] = (uint8_t)(((uint16_t)x) >> 0) & 0xFF;\n}\n\n// parse_begin is called to begin parsing a client message.\n#define parse_begin() \\\n const char *p = data; \\\n const char *e = p+len; \\\n (void)args, (void)pg, (void)e;\n\n// parse_end is called when parsing client message is complete.\n// This will check that the position of the client stream matches the\n// expected lenght provided by the client. \n#define parse_end() \\\n if ((size_t)(p-data) != len) { \\\n return -1; \\\n }\n\n#define parse_cstr() ({ \\\n const char *cstr = 0; \\\n const char *s = p; \\\n while (p < e) { \\\n if (*p == '\\0') { \\\n cstr = s; \\\n p++; \\\n break; \\\n } \\\n p++; \\\n } \\\n if (!cstr) { \\\n return -1; \\\n } \\\n cstr; \\\n}) \n\n#define parse_int16() ({ \\\n if (e-p < 2) { \\\n return -1; \\\n } \\\n int16_t x = read_i16(p); \\\n p += 2; \\\n x; \\\n})\n\n#define parse_byte() ({ \\\n if (e-p < 1) { \\\n return -1; \\\n } \\\n uint8_t x = *p; \\\n p += 1; \\\n x; \\\n})\n\n#define parse_int32() ({ \\\n if (e-p < 4) { \\\n return -1; \\\n } \\\n int32_t x = read_i32(p); \\\n p += 4; \\\n x; \\\n})\n\n#define parse_bytes(n) ({ \\\n if (e-p < n) { \\\n return -1; \\\n } \\\n const void *s = p; \\\n p += (n); \\\n s; \\\n})\n\nstatic void arg_append_unescape_simplestr(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n char *str2 = xmalloc(slen+1);\n for (size_t i = 0; i < str2len; i++) {\n if (str[i] == '\\'' && str[i+1] == '\\'') {\n i++;\n }\n str2[str2len++] = str[i];\n }\n args_append(args, str2, str2len, false);\n xfree(str2);\n}\n\nstatic void pg_statement_free(struct pg_statement *statement) {\n args_free(&statement->args);\n buf_clear(&statement->argtypes);\n}\n\n\nstatic void pg_portal_free(struct pg_portal *portal) {\n args_free(&portal->params);\n}\n\nstatic void statments_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n pg_statement_free(&statement);\n }\n hashmap_free(map);\n}\n\nstatic void portals_free(struct hashmap *map) {\n if (!map) {\n return;\n }\n size_t i = 0;\n void *item;\n while (hashmap_iter(map, &i, &item)) {\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n pg_portal_free(&portal);\n }\n hashmap_free(map);\n}\n\nstruct pg *pg_new(void) {\n struct pg *pg = xmalloc(sizeof(struct pg));\n memset(pg, 0, sizeof(struct pg));\n pg->oid = TEXTOID;\n return pg;\n}\n\nvoid pg_free(struct pg *pg) {\n if (!pg) {\n return;\n }\n xfree(pg->application_name);\n xfree(pg->database);\n xfree(pg->user);\n buf_clear(&pg->buf);\n statments_free(pg->statements);\n portals_free(pg->portals);\n args_free(&pg->targs);\n // args_free(&pg->xargs);\n xfree(pg->desc);\n xfree(pg);\n}\n\nstatic uint64_t pg_statement_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_statement statement;\n memcpy(&statement, item, sizeof(struct pg_statement));\n return hashmap_murmur(statement.name, strlen(statement.name), seed0, seed1);\n}\n\nstatic uint64_t pg_portal_hash(const void *item, uint64_t seed0, \n uint64_t seed1)\n{\n struct pg_portal portal;\n memcpy(&portal, item, sizeof(struct pg_portal));\n return hashmap_murmur(portal.name, strlen(portal.name), seed0, seed1);\n}\n\nstatic int pg_statement_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_statement stmta;\n memcpy(&stmta, a, sizeof(struct pg_statement));\n struct pg_statement stmtb;\n memcpy(&stmtb, b, sizeof(struct pg_statement));\n return strcmp(stmta.name, stmtb.name);\n}\n\nstatic int pg_portal_compare(const void *a, const void *b, void *udata) {\n (void)udata;\n struct pg_portal portala;\n memcpy(&portala, a, sizeof(struct pg_portal));\n struct pg_portal portalb;\n memcpy(&portalb, b, sizeof(struct pg_portal));\n return strcmp(portala.name, portalb.name);\n}\n\nstatic void portal_insert(struct pg *pg, struct pg_portal *portal) {\n (void)portal;\n if (!pg->portals) {\n pg->portals = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_portal), 0, 0, 0, pg_portal_hash, \n pg_portal_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->portals, portal);\n if (ptr) {\n struct pg_portal old;\n memcpy(&old, ptr, sizeof(struct pg_portal));\n pg_portal_free(&old);\n }\n}\n\nstatic void statement_insert(struct pg *pg, struct pg_statement *stmt) {\n if (!pg->statements) {\n pg->statements = hashmap_new_with_allocator(xmalloc, xrealloc, xfree, \n sizeof(struct pg_statement), 0, 0, 0, pg_statement_hash, \n pg_statement_compare, 0, 0);\n }\n const void *ptr = hashmap_set(pg->statements, stmt);\n if (ptr) {\n struct pg_statement old;\n memcpy(&old, ptr, sizeof(struct pg_statement));\n pg_statement_free(&old);\n }\n}\n\nstatic bool statement_get(struct pg *pg, const char *name, \n struct pg_statement *stmt)\n{\n if (!pg->statements) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_statement key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->statements, &key);\n if (!ptr) {\n return false;\n }\n memcpy(stmt, ptr, sizeof(struct pg_statement));\n return true;\n}\n\nstatic bool portal_get(struct pg *pg, const char *name, \n struct pg_portal *portal)\n{\n if (!pg->portals) {\n return false;\n }\n size_t namelen = strlen(name);\n if (namelen >= PGNAMEDATALEN) {\n return false;\n }\n struct pg_portal key = { 0 };\n strcpy(key.name, name);\n const void *ptr = hashmap_get(pg->portals, &key);\n if (!ptr) {\n return false;\n }\n memcpy(portal, ptr, sizeof(struct pg_portal));\n return true;\n}\n\nstatic const uint8_t hextoks[256] = { \n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4,5,6,7,8,9,0,0,0,0,0,0,\n 0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n};\n\nstatic uint32_t decode_hex(const uint8_t *str) {\n return (((int)hextoks[str[0]])<<12) | (((int)hextoks[str[1]])<<8) |\n (((int)hextoks[str[2]])<<4) | (((int)hextoks[str[3]])<<0);\n}\n\nstatic bool is_surrogate(uint32_t cp) {\n return cp > 55296 && cp < 57344;\n}\n\nstatic uint32_t decode_codepoint(uint32_t cp1, uint32_t cp2) {\n return cp1 > 55296 && cp1 < 56320 && cp2 > 56320 && cp2 < 57344 ?\n ((cp1 - 55296) << 10) | ((cp2 - 56320) + 65536) :\n 65533;\n}\n\nstatic inline int encode_codepoint(uint8_t dst[], uint32_t cp) {\n if (cp < 128) {\n dst[0] = cp;\n return 1;\n } else if (cp < 2048) {\n dst[0] = 192 | (cp >> 6);\n dst[1] = 128 | (cp & 63);\n return 2;\n } else if (cp > 1114111 || is_surrogate(cp)) {\n cp = 65533; // error codepoint\n }\n if (cp < 65536) {\n dst[0] = 224 | (cp >> 12);\n dst[1] = 128 | ((cp >> 6) & 63);\n dst[2] = 128 | (cp & 63);\n return 3;\n }\n dst[0] = 240 | (cp >> 18);\n dst[1] = 128 | ((cp >> 12) & 63);\n dst[2] = 128 | ((cp >> 6) & 63);\n dst[3] = 128 | (cp & 63);\n return 4;\n}\n\n// for_each_utf8 iterates over each UTF-8 bytes in jstr, unescaping along the\n// way. 'f' is a loop expression that will make available the 'ch' char which \n// is just a single byte in a UTF-8 series.\n// this is taken from https://github.com/tidwall/json.c\n#define for_each_utf8(jstr, len, f) { \\\n size_t nn = (len); \\\n int ch = 0; \\\n (void)ch; \\\n for (size_t ii = 0; ii < nn; ii++) { \\\n if ((jstr)[ii] != '\\\\') { \\\n ch = (jstr)[ii]; \\\n if (1) f \\\n continue; \\\n }; \\\n ii++; \\\n if (ii == nn) break; \\\n switch ((jstr)[ii]) { \\\n case '\\\\': ch = '\\\\'; break; \\\n case '/' : ch = '/'; break; \\\n case 'b' : ch = '\\b'; break; \\\n case 'f' : ch = '\\f'; break; \\\n case 'n' : ch = '\\n'; break; \\\n case 'r' : ch = '\\r'; break; \\\n case 't' : ch = '\\t'; break; \\\n case '\"' : ch = '\"'; break; \\\n case 'u' : \\\n if (ii+5 > nn) { nn = 0; continue; }; \\\n uint32_t cp = decode_hex((jstr)+ii+1); \\\n ii += 5; \\\n if (is_surrogate(cp)) { \\\n if (nn-ii >= 6 && (jstr)[ii] == '\\\\' && (jstr)[ii+1] == 'u') { \\\n cp = decode_codepoint(cp, decode_hex((jstr)+ii+2)); \\\n ii += 6; \\\n } \\\n } \\\n uint8_t _bytes[4]; \\\n int _n = encode_codepoint(_bytes, cp); \\\n for (int _j = 0; _j < _n; _j++) { \\\n ch = _bytes[_j]; \\\n if (1) f \\\n } \\\n ii--; \\\n continue; \\\n default: \\\n continue; \\\n }; \\\n if (1) f \\\n } \\\n}\n\nstatic void arg_append_unescape_str(struct args *args, const char *str,\n size_t slen)\n{\n size_t str2len = 0;\n uint8_t *str2 = xmalloc(slen+1);\n for_each_utf8((uint8_t*)str, slen, {\n str2[str2len++] = ch;\n });\n args_append(args, (char*)str2, str2len, false);\n xfree(str2);\n}\n\n// Very simple map to stores all params numbers.\nstruct pmap {\n int count;\n int nbuckets;\n uint16_t *buckets;\n uint16_t def[8];\n};\n\nstatic void pmap_init(struct pmap *map) {\n memset(map, 0, sizeof(struct pmap));\n map->nbuckets = sizeof(map->def)/sizeof(uint16_t);\n map->buckets = map->def;\n}\n\nstatic void pmap_free(struct pmap *map) {\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n}\n\nstatic void pmap_insert0(uint16_t *buckets, int nbuckets, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%nbuckets;\n while (1) {\n if (buckets[i] == 0) {\n buckets[i] = param;\n return;\n }\n i = (i+1)%nbuckets;\n }\n}\n\nstatic void pmap_grow(struct pmap *map) {\n int nbuckets2 = map->nbuckets*2;\n uint16_t *buckets2 = xmalloc(nbuckets2*sizeof(uint16_t));\n memset(buckets2, 0, nbuckets2*sizeof(uint16_t));\n for (int i = 0; i < map->nbuckets; i++) {\n if (map->buckets[i]) {\n pmap_insert0(buckets2, nbuckets2, map->buckets[i]);\n }\n }\n if (map->buckets != map->def) {\n xfree(map->buckets);\n }\n map->buckets = buckets2;\n map->nbuckets = nbuckets2;\n}\n\nstatic void pmap_insert(struct pmap *map, uint16_t param) {\n assert(param != 0);\n if (map->count == (map->nbuckets>>1)+(map->nbuckets>>2)) {\n pmap_grow(map);\n }\n pmap_insert0(map->buckets, map->nbuckets, param);\n map->count++;\n}\n\nstatic bool pmap_exists(struct pmap *map, uint16_t param) {\n uint16_t hash = mix13(param);\n int i = hash%map->nbuckets;\n while (1) {\n if (map->buckets[i] == 0) {\n return false;\n }\n if (map->buckets[i] == param) {\n return true;\n }\n i = (i+1)%map->nbuckets;\n }\n}\n\nstatic bool parse_query_args(const char *query, struct args *args, \n int *nparams, struct buf *argtypes)\n{\n dprintf(\"parse_query: [%s]\\n\", query);\n struct pmap pmap;\n pmap_init(&pmap);\n\n // loop through each keyword\n while (isspace(*query)) {\n query++;\n }\n bool ok = false;\n bool esc = false;\n const char *str;\n const char *p = query;\n bool join = false;\n while (*p) {\n switch (*p) {\n case ';':\n goto break_while;\n case '\\\"':\n // identifier\n parse_errorf(\"idenifiers not allowed\");\n goto done;\n case '\\'':\n // simple string\n p++;\n str = p;\n esc = false;\n while (*p) {\n if (*p == '\\'') {\n if (*(p+1) == '\\'') {\n esc = true;\n p += 2;\n continue;\n }\n break;\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_simplestr(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n case '$':\n // dollar-quote or possible param\n if (*(p+1) >= '0' && *(p+1) <= '9') {\n char *e = 0;\n long param = strtol(p+1, &e, 10);\n if (param == 0 || param > 0xFFFF) {\n parse_errorf(\"there is no parameter $%ld\", param);\n goto done;\n }\n pmap_insert(&pmap, param);\n args_append(args, p, e-p, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'P'+join);\n join = *e && !isspace(*e);\n }\n p = e;\n continue;\n }\n // dollar-quote strings not\n parse_errorf(\"dollar-quote strings not allowed\");\n goto done;\n case 'E': case 'e':\n if (*(p+1) == '\\'') {\n // escaped string\n p += 2;\n str = p;\n while (*p) {\n if (*p == '\\\\') {\n esc = true;\n } else if (*p == '\\'') {\n size_t x = 0;\n while (*(p-x-1) == '\\\\') {\n x++;\n }\n if ((x%2)==0) {\n break;\n }\n }\n p++;\n }\n if (*p != '\\'') {\n parse_errorf(\"unterminated quoted string\");\n goto done;\n }\n size_t slen = p-str;\n if (!esc) {\n args_append(args, str, slen, true);\n } else {\n arg_append_unescape_str(args, str, slen);\n }\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *(p+1) && !isspace(*(p+1));\n }\n break;\n }\n // fallthrough\n default:\n if (isspace(*p)) {\n p++;\n continue;\n }\n // keyword\n const char *keyword = p;\n while (*p && !isspace(*p)) {\n if (*p == ';' || *p == '\\'' || *p == '\\\"' || *p == '$') {\n break;\n }\n p++;\n }\n size_t keywordlen = p-keyword;\n args_append(args, keyword, keywordlen, true);\n if (argtypes) {\n buf_append_byte(argtypes, 'A'+join);\n join = *p && !isspace(*p);\n }\n while (isspace(*p)) {\n p++;\n }\n continue;\n }\n p++;\n }\nbreak_while:\n while (*p) {\n if (*p != ';') {\n parse_errorf(\"unexpected characters at end of query\");\n goto done;\n }\n p++;\n }\n ok = true;\ndone:\n if (ok) {\n // check params\n for (int i = 0; i < pmap.count; i++) {\n if (!pmap_exists(&pmap, i+1)) {\n parse_errorf(\"missing parameter $%d\", i+1);\n ok = false;\n break;\n }\n }\n }\n *nparams = pmap.count;\n pmap_free(&pmap);\n if (argtypes) {\n buf_append_byte(argtypes, '\\0');\n }\n return ok;\n}\n\nstatic bool parse_cache_query_args(const char *query, struct args *args,\n int *maxparam, struct buf *argtypes)\n{\n while (isspace(*query)) {\n query++;\n }\n if (!parse_query_args(query, args, maxparam, argtypes)) {\n return false;\n }\n#ifdef PGDEBUG\n args_print(args);\n#endif\n if (argtypes) {\n dprintf(\"argtypes: [%s]\\n\", argtypes->data);\n }\n return true;\n}\n\nstatic size_t parseQ(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Query\n dprintf(\">>> Query\\n\");\n parse_begin();\n const char *query = parse_cstr();\n parse_end();\n int nparams = 0;\n bool pok = parse_cache_query_args(query, args, &nparams, 0);\n if (!pok) {\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (nparams > 0) {\n parse_seterror(\"query cannot have parameters\");\n pg->error = 1;\n args_clear(args);\n return len;\n }\n if (args->len == 0) {\n pg->empty_query = 1;\n }\n return len;\n}\n\nstatic size_t parseP(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Parse\n dprintf(\"<<< Parse\\n\");\n // print_packet(data, len);\n parse_begin();\n const char *stmt_name = parse_cstr();\n const char *query = parse_cstr();\n uint16_t num_param_types = parse_int16();\n // dprintf(\". Parse [%s] [%s] [%d]\\n\", stmt_name, query,\n // (int)num_param_types);\n for (uint16_t i = 0; i < num_param_types; i++) {\n int32_t param_type = parse_int32();\n (void)param_type;\n // dprintf(\". [%d]\\n\", param_type);\n }\n parse_end();\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n int nparams = 0;\n struct buf argtypes = { 0 };\n bool ok = parse_cache_query_args(query, args, &nparams, &argtypes);\n if (!ok) {\n pg->error = 1;\n args_clear(args);\n buf_clear(&argtypes);\n return len;\n }\n // copy over last statement\n struct pg_statement stmt = { 0 };\n strcpy(stmt.name, stmt_name);\n stmt.nparams = nparams;\n // copy over parsed args\n for (size_t i = 0; i < args->len; i++) {\n args_append(&stmt.args, args->bufs[i].data, args->bufs[i].len, false);\n }\n args_clear(args);\n stmt.argtypes = argtypes;\n statement_insert(pg, &stmt);\n pg->parse = 1;\n return len;\n}\n\nstatic size_t parseD(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // Describe\n dprintf(\"<<< Describe\\n\");\n if (pg->describe) {\n // Already has a describe in a sequence\n pg->error = 1;\n parse_errorf(\"double describe not allowed\");\n return -1;\n }\n // print_packet(data, len);\n parse_begin();\n uint8_t type = parse_byte();\n const char *name = parse_cstr();\n parse_end();\n\n dprintf(\". Describe [%c] [%s]\\n\", type, name);\n if (type == 'P' || type == 'P'+1) {\n struct pg_portal portal;\n if (!portal_get(pg, name, &portal)) {\n parse_errorf(\"portal not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('T')\n // Int32 length\n // Int16 field_count\n // Field[] fields\n // all fields are unnamed text\n char field[] = { \n 0x00, // \"\\0\" (field name)\n 0x00, 0x00, 0x00, 0x00, // table_oid = 0\n 0x00, 0x00, // column_attr_no = 0\n 0x00, 0x00, 0x00, pg->oid, // type_oid = 25 (text)\n 0xFF, 0xFF, // type_size = -1\n 0xFF, 0xFF, 0xFF, 0xFF, // type_modifier = -1\n 0x00, 0x00, // format_code = 0 (text)\n };\n static_assert(sizeof(field) == 19, \"\");\n size_t size = 1+4+2+portal.params.len*sizeof(field);\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 'T';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, portal.params.len);\n p1 += 2;\n for (size_t i = 0; i < portal.params.len; i++) {\n memcpy(p1, field, sizeof(field));\n p1 += sizeof(field);\n }\n pg->desclen = size;\n return len;\n }\n\n if (type == 'S') {\n struct pg_statement stmt;\n if (!statement_get(pg, name, &stmt)) {\n parse_errorf(\"statement not found\");\n pg->error = 1;\n return len;\n }\n // Byte1('t')\n // Int32 length\n // Int16 num_params\n // Int32[] param_type_oids\n size_t size = 1+4+2+stmt.nparams*4;\n if (pg->desc) {\n xfree(pg->desc);\n }\n pg->desc = xmalloc(size);\n memset(pg->desc, 0, size);\n char *p1 = pg->desc;\n *(p1++) = 't';\n write_i32(p1, size-1);\n p1 += 4;\n write_i16(p1, stmt.nparams);\n p1 += 2;\n for (int i = 0; i < stmt.nparams; i++) {\n write_i32(p1, pg->oid);\n p1 += 4;\n }\n pg->desclen = size;\n pg->describe = 1;\n return len;\n }\n parse_errorf(\"unsupported describe type '%c'\", type);\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseB(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n\n // Bind\n dprintf(\"<<< Bind\\n\");\n\n // print_packet(data, len);\n\n // X Byte1('B') # Bind message identifier\n // X Int32 length # Message length including self\n //\n // String portal_name # Destination portal (\"\" = unnamed)\n // String statement_name # Prepared statement name (from Parse)\n // Int16 num_format_codes # 0 = all text, 1 = one for all, or N\n // [Int16] format_codes # 0 = text, 1 = binary\n // Int16 num_parameters\n // [parameter values]\n // Int16 num_result_formats\n // [Int16] result_format_codes\n\n parse_begin();\n const char *portal_name = parse_cstr();\n const char *stmt_name = parse_cstr();\n int num_formats = parse_int16();\n for (int i = 0; i < num_formats; i++) {\n int format = parse_int16();\n if (format != 0 && format != 1) {\n parse_errorf(\"only text or binary format allowed\");\n pg->error = 1;\n return len;\n }\n }\n uint16_t num_params = parse_int16();\n args_clear(&pg->targs);\n for (int i = 0; i < num_params; i++) {\n int32_t len = parse_int32();\n if (len <= 0) {\n // Nulls are empty strings\n len = 0;\n }\n const char *b = parse_bytes(len);\n args_append(&pg->targs, b, len, false);\n }\n // ignore result formats\n uint16_t num_result_formats = parse_int16();\n for (int i = 0; i < num_result_formats; i++) {\n int result_format_codes = parse_int16();\n (void)result_format_codes;\n }\n parse_end();\n\n if (strlen(portal_name) >= PGNAMEDATALEN) {\n parse_seterror(\"portal name too large\");\n pg->error = 1;\n return len;\n }\n if (strlen(stmt_name) >= PGNAMEDATALEN) {\n parse_seterror(\"statement name too large\");\n pg->error = 1;\n return len;\n }\n struct pg_portal portal = { 0 };\n strcpy(portal.name, portal_name);\n strcpy(portal.stmt, stmt_name);\n memcpy(&portal.params, &pg->targs, sizeof(struct args));\n memset(&pg->targs, 0, sizeof(struct args));\n portal_insert(pg, &portal);\n pg->bind = 1;\n return len;\n}\n\nstatic size_t parseX(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Close\n dprintf(\"<<< Close\\n\");\n parse_begin();\n parse_end();\n pg->close = 1;\n return len;\n}\n\nstatic size_t parseE(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args, (void)pg;\n // Execute\n dprintf(\"<<< Execute\\n\");\n parse_begin();\n const char *portal_name = parse_cstr();\n size_t max_rows = parse_int32();\n parse_end();\n struct pg_portal portal;\n if (!portal_get(pg, portal_name, &portal)) {\n parse_seterror(\"portal not found\");\n pg->error = 1;\n return len;\n }\n struct pg_statement stmt;\n if (!statement_get(pg, portal.stmt, &stmt)) {\n parse_seterror(\"statement not found\");\n pg->error = 1;\n return len;\n }\n if ((size_t)stmt.nparams != portal.params.len) {\n parse_seterror(\"portal params mismatch\");\n pg->error = 1;\n return len;\n }\n // ignore max_rows\n (void)max_rows;\n\n // \n args_clear(&pg->targs);\n for (size_t i = 0; i < stmt.args.len; i++) {\n const char *arg = stmt.args.bufs[i].data;\n size_t arglen = stmt.args.bufs[i].len;\n char atype = stmt.argtypes.data[i];\n dprintf(\"[%.*s] [%c]\\n\", (int)arglen, arg, atype);\n bool join = false;\n switch (atype) {\n case 'A'+1:\n atype = 'A';\n join = true;\n break;\n case 'P':\n join = false;\n break;\n case 'P'+1:\n atype = 'P';\n join = true;\n break;\n }\n if (atype == 'P') {\n if (arglen == 0 || arg[0] != '$') {\n goto internal_error;\n }\n uint64_t x;\n bool ok = parse_u64(arg+1, arglen-1, &x);\n if (!ok || x == 0 || x > 0xFFFF) {\n goto internal_error;\n }\n size_t paramidx = x-1;\n if (paramidx >= portal.params.len) {\n goto internal_error;\n }\n arg = portal.params.bufs[paramidx].data;\n arglen = portal.params.bufs[paramidx].len;\n }\n if (join) {\n assert(pg->targs.len > 0);\n buf_append(&pg->targs.bufs[pg->targs.len-1], arg, arglen);\n } else {\n args_append(&pg->targs, arg, arglen, false);\n }\n }\n\n struct args swapargs = *args;\n *args = pg->targs;\n pg->targs = swapargs;\n\n#ifdef PGDEBUG\n args_print(args);\n#endif\n\n pg->execute = 1;\n return len;\ninternal_error:\n parse_seterror(\"portal params internal error\");\n pg->error = 1;\n return len;\n}\n\nstatic size_t parseS(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n (void)args;\n // Sync\n dprintf(\"<<< Sync\\n\");\n // print_packet(data, len);\n parse_begin();\n parse_end();\n pg->sync = 1;\n return len;\n}\n\nstatic size_t parsep(const char *data, size_t len, struct args *args, \n struct pg *pg)\n{\n // PasswordMessage\n parse_begin();\n const char *password = parse_cstr();\n parse_end();\n if (strcmp(password, auth) != 0) {\n parse_seterror(\n \"WRONGPASS invalid username-password pair or user is disabled.\");\n return -1;\n }\n pg->auth = 1;\n return len;\n}\n\nstatic ssize_t parse_message(const char *data, size_t len, struct args *args,\n struct pg *pg)\n{\n if (len < 5) {\n return 0;\n }\n int msgbyte = data[0];\n size_t msglen = read_i32(data+1);\n if (len < msglen+1) {\n return 0;\n }\n msglen -= 4;\n data += 5;\n ssize_t ret;\n switch (msgbyte) {\n case 'Q':\n ret = parseQ(data, msglen, args, pg);\n break;\n case 'P':\n ret = parseP(data, msglen, args, pg);\n break;\n case 'X':\n ret = parseX(data, msglen, args, pg);\n break;\n case 'E':\n ret = parseE(data, msglen, args, pg);\n break;\n case 'p': // lowercase\n ret = parsep(data, msglen, args, pg);\n break;\n case 'D':\n ret = parseD(data, msglen, args, pg);\n break;\n case 'B':\n ret = parseB(data, msglen, args, pg);\n break;\n case 'S':\n ret = parseS(data, msglen, args, pg);\n break;\n default:\n pg->error = 1;\n parse_errorf(\"unknown message '%c'\", msgbyte);\n ret = msglen;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+5;\n}\n\nstatic ssize_t parse_magic_ssl(const char *data, size_t len, struct pg *pg) {\n (void)data;\n // SSLRequest\n pg->ssl = 1;\n return len;\n}\n\nstatic ssize_t parse_magic_proto3(const char *data, size_t len, struct pg *pg) {\n // StartupMessage\n const char *p = (void*)data;\n const char *e = p+len;\n // Read parameters\n const char *user = \"\";\n const char *database = \"\";\n const char *application_name = \"\";\n const char *client_encoding = \"\";\n const char *name = 0;\n const char *s = (char*)p;\n while (p < e) {\n if (*p == '\\0') {\n if (s != p) {\n if (name) {\n if (strcmp(name, \"database\") == 0) {\n database = s;\n } else if (strcmp(name, \"application_name\") == 0) {\n application_name = s;\n } else if (strcmp(name, \"client_encoding\") == 0) {\n client_encoding = s;\n } else if (strcmp(name, \"user\") == 0) {\n user = s;\n }\n name = 0;\n } else {\n name = s;\n }\n }\n s = p+1;\n }\n p++;\n }\n // dprintf(\". database=%s, application_name=%s, client_encoding=%s, \"\n // \"user=%s\\n\", database, application_name, client_encoding, user);\n if (*client_encoding && strcmp(client_encoding, \"UTF8\") != 0) {\n printf(\"# Invalid Postgres client_encoding (%s)\\n\",\n client_encoding);\n return -1;\n }\n pg->user = xmalloc(strlen(user)+1);\n strcpy((char*)pg->user, user);\n pg->database = xmalloc(strlen(database)+1);\n strcpy((char*)pg->database, database);\n pg->application_name = xmalloc(strlen(application_name)+1);\n strcpy((char*)pg->application_name, application_name);\n pg->startup = 1;\n return p-data;\n}\n\nstatic ssize_t parse_magic_cancel(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n parse_errorf(\"cancel message unsupported\");\n return -1;\n}\n\nstatic ssize_t parse_magic(const char *data, size_t len, struct pg *pg) {\n (void)data; (void)len; (void)pg;\n if (len < 4) {\n return 0;\n }\n size_t msglen = read_i32(data);\n if (msglen > 65536) {\n parse_errorf(\"message too large\");\n return -1;\n }\n if (len < msglen) {\n return 0;\n }\n if (msglen < 8) {\n parse_errorf(\"invalid message\");\n return -1;\n }\n // dprintf(\"parse_magic\\n\");\n uint32_t magic = read_i32(data+4);\n data += 8;\n msglen -= 8;\n ssize_t ret;\n switch (magic) {\n case 0x04D2162F: \n ret = parse_magic_ssl(data, msglen, pg);\n break;\n case 0x00030000: \n ret = parse_magic_proto3(data, msglen, pg);\n break;\n case 0xFFFF0000: \n ret = parse_magic_cancel(data, msglen, pg);\n break;\n default:\n parse_errorf(\"Protocol error: unknown magic number %08x\", magic);\n ret = -1;\n }\n if (ret == -1 || (size_t)ret != msglen) {\n return -1;\n }\n return msglen+8;\n}\n\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pgptr)\n{\n (void)print_packet;\n // print_packet(data, len);\n struct pg *pg = *pgptr;\n if (!pg) {\n pg = pg_new();\n *pgptr = pg;\n }\n pg->error = 0;\n if (len == 0) {\n return 0;\n }\n if (data[0] == 0) {\n return parse_magic(data, len, pg);\n }\n return parse_message(data, len, args, pg);\n}\n\nvoid pg_write_auth(struct conn *conn, unsigned char code) {\n unsigned char bytes[] = { \n 'R', 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n}\n\nvoid pg_write_ready(struct conn *conn, unsigned char code) {\n if (!pg_execute(conn)) {\n unsigned char bytes[] = { \n 'Z', 0x0, 0x0, 0x0, 0x5, code,\n };\n conn_write_raw(conn, bytes, sizeof(bytes));\n }\n}\n\nvoid pg_write_status(struct conn *conn, const char *key, const char *val) {\n size_t keylen = strlen(key);\n size_t vallen = strlen(val);\n int32_t size = 4+keylen+1+vallen+1;\n char *bytes = xmalloc(1+size);\n bytes[0] = 'S';\n write_i32(bytes+1, size);\n memcpy(bytes+1+4,key,keylen+1);\n memcpy(bytes+1+4+keylen+1,val,vallen+1);\n conn_write_raw(conn, bytes, 1+size);\n xfree(bytes);\n}\n\nvoid pg_write_row_desc(struct conn *conn, const char **fields, int nfields){\n size_t size = 1+4+2;\n for (int i = 0; i < nfields; i++) {\n size += strlen(fields[i])+1;\n size += 4+2+4+2+4+2;\n }\n int oid = conn_pg(conn)->oid;\n char *bytes = xmalloc(size);\n bytes[0] = 'T';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, nfields); // field_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < nfields; i++) {\n size_t fsize = strlen(fields[i]);\n memcpy(p, fields[i], fsize+1);\n p += fsize+1;\n write_i32(p, 0); // table_oid\n p += 4;\n write_i16(p, 0); // column_attr_number\n p += 2;\n write_i32(p, oid); // type_oid\n p += 4;\n write_i16(p, -1); // type_size\n p += 2;\n write_i32(p, -1); // type_modifier\n p += 4;\n write_i16(p, 1); // format_code\n p += 2;\n }\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_row_data(struct conn *conn, const char **cols, \n const size_t *collens, int ncols)\n{\n size_t size = 1+4+2;\n for (int i = 0; i < ncols; i++) {\n size += 4+collens[i];\n }\n char *bytes = xmalloc(size);\n bytes[0] = 'D';\n write_i32(bytes+1, size-1); // message_size\n write_i16(bytes+1+4, ncols); // column_count\n char *p = bytes+1+4+2;\n for (int i = 0; i < ncols; i++) {\n write_i32(p, collens[i]); // column_length\n p += 4;\n#ifdef PGDEBUG\n printf(\" ROW >>>> len:%zu [\", collens[i]);\n binprint(cols[i], collens[i]);\n printf(\"]\\n\");\n#endif\n memcpy(p, cols[i], collens[i]); // column_data\n p += collens[i];\n }\n \n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_complete(struct conn *conn, const char *tag){\n size_t taglen = strlen(tag);\n size_t size = 1+4+taglen+1;\n char *bytes = xmalloc(size);\n bytes[0] = 'C';\n write_i32(bytes+1, size-1); // message_size\n memcpy(bytes+1+4, tag, taglen+1);\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\nvoid pg_write_completef(struct conn *conn, const char *tag_format, ...){\n // initializing list pointer\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_complete(conn, tag);\n}\n\nvoid pg_write_simple_row_data_ready(struct conn *conn, const char *desc,\n const void *row, size_t len, const char *tag)\n{\n pg_write_row_desc(conn, (const char*[]){ desc }, 1);\n pg_write_row_data(conn, (const char*[]){ row }, (size_t[]){ len }, 1);\n pg_write_complete(conn, tag);\n pg_write_ready(conn, 'I');\n}\n\nvoid pg_write_simple_row_str_ready(struct conn *conn, const char *desc,\n const char *row, const char *tag)\n{\n pg_write_simple_row_data_ready(conn, desc, row, strlen(row), tag);\n}\n\nvoid pg_write_simple_row_i64_ready(struct conn *conn, const char *desc,\n int64_t row, const char *tag)\n{\n char val[32];\n snprintf(val, sizeof(val), \"%\" PRIi64, row);\n pg_write_simple_row_str_ready(conn, desc, val, tag);\n}\n\nvoid pg_write_simple_row_str_readyf(struct conn *conn, const char *desc,\n const char *row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_str_ready(conn, desc, row, tag);\n}\n\nvoid pg_write_simple_row_i64_readyf(struct conn *conn, const char *desc,\n int64_t row, const char *tag_format, ...)\n{\n char tag[128];\n va_list ap;\n va_start(ap, tag_format);\n vsnprintf(tag, sizeof(tag)-1, tag_format, ap);\n va_end(ap);\n pg_write_simple_row_i64_ready(conn, desc, row, tag);\n}\n\nstatic void write_auth_ok(struct conn *conn, struct pg *pg) {\n // dprintf(\">> AuthOK\\n\");\n pg_write_auth(conn, 0); // AuthOK;\n // startup message received, respond\n pg_write_status(conn, \"client_encoding\", \"UTF8\");\n pg_write_status(conn, \"server_encoding\", \"UTF8\");\n char status[128];\n snprintf(status, sizeof(status), \"%s (Pogocache)\", version);\n pg_write_status(conn, \"server_version\", status);\n pg_write_ready(conn, 'I'); // Idle;\n pg->ready = 1;\n}\n\n// Respond to various the connection states.\n// Returns true if the all responses complete or false if there was an\n// error.\nbool pg_respond(struct conn *conn, struct pg *pg) {\n if (pg->error) {\n conn_write_error(conn, parse_lasterror());\n return true;\n }\n if (pg->empty_query) {\n dprintf(\"====== pg_respond(pg->empty_query) =====\\n\");\n conn_write_raw(conn, \"I\\0\\0\\0\\4\", 5);\n conn_write_raw(conn, \"Z\\0\\0\\0\\5I\", 6);\n pg->empty_query = 0;\n return true;\n }\n if (pg->parse) {\n dprintf(\"====== pg_respond(pg->parse) =====\\n\");\n conn_write_raw(conn, \"1\\0\\0\\0\\4\", 5);\n pg->parse = 0;\n return true;\n }\n if (pg->bind) {\n dprintf(\"====== pg_respond(pg->bind) =====\\n\");\n conn_write_raw(conn, \"2\\0\\0\\0\\4\", 5);\n pg->bind = 0;\n return true;\n }\n if (pg->describe) {\n dprintf(\"====== pg_respond(pg->describe) =====\\n\");\n assert(pg->desc);\n conn_write_raw(conn, pg->desc, pg->desclen);\n xfree(pg->desc);\n pg->desc = 0;\n pg->desclen = 0;\n pg->describe = 0;\n return true;\n }\n if (pg->sync) {\n dprintf(\"====== pg_respond(pg->sync) =====\\n\");\n pg->execute = 0;\n pg_write_ready(conn, 'I');\n pg->sync = 0;\n return true;\n }\n if (pg->close) {\n dprintf(\"====== pg_respond(pg->close) =====\\n\");\n pg->close = 0;\n return false;\n }\n if (pg->ssl == 1) {\n if (!conn_istls(conn)) {\n conn_write_raw_cstr(conn, \"N\");\n } else {\n conn_write_raw_cstr(conn, \"Y\");\n }\n pg->ssl = 0;\n return true;\n }\n if (pg->auth == 1) {\n if (pg->startup == 0) {\n return false;\n }\n conn_setauth(conn, true);\n write_auth_ok(conn, pg);\n pg->auth = 0;\n return true;\n }\n if (pg->startup == 1) {\n if (auth && *auth) {\n pg_write_auth(conn, 3); // AuthenticationCleartextPassword;\n } else {\n write_auth_ok(conn, pg);\n pg->startup = 0;\n }\n return true;\n }\n return true;\n}\n\nvoid pg_write_error(struct conn *conn, const char *msg) {\n size_t msglen = strlen(msg);\n size_t size = 1+4;\n size += 1+5+1; // 'S' \"ERROR\" \\0\n size += 1+5+1; // 'V' \"ERROR\" \\0\n size += 1+5+1; // 'C' \"23505\" \\0\n size += 1+msglen+1; // 'M' msg \\0\n size += 1; // null-terminator\n char *bytes = xmalloc(size);\n bytes[0] = 'E';\n write_i32(bytes+1, size-1);\n char *p = bytes+1+4;\n memcpy(p, \"SERROR\", 7);\n p += 7;\n memcpy(p, \"VERROR\", 7);\n p += 7;\n memcpy(p, \"C23505\", 7);\n p += 7;\n p[0] = 'M';\n p++;\n memcpy(p, msg, msglen+1);\n p += msglen+1;\n p[0] = '\\0';\n conn_write_raw(conn, bytes, size);\n xfree(bytes);\n}\n\n// return true if the command need further execution, of false if this\n// operation handled it already\nbool pg_precommand(struct conn *conn, struct args *args, struct pg *pg) {\n#ifdef PGDEBUG\n printf(\"precommand: \");\n args_print(args);\n#endif\n if (args->len > 0 && args->bufs[0].len > 0) {\n char c = tolower(args->bufs[0].data[0]);\n if (c == 'b' || c == 'r' || c == 'c') {\n // silently ignore transaction commands.\n if (c == 'b' && argeq(args, 0, \"begin\")) {\n pg_write_completef(conn, \"BEGIN\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"rollback\")) {\n pg_write_completef(conn, \"ROLLBACK\");\n pg_write_ready(conn, 'I');\n return false;\n }\n if (argeq(args, 0, \"commit\")) {\n pg_write_completef(conn, \"COMMIT\");\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n if (c == ':' && args->bufs[0].len > 1 && args->bufs[0].data[1] == ':') {\n if (argeq(args, 0, \"::bytea\") || argeq(args, 0, \"::bytes\")) {\n pg->oid = BYTEAOID;\n } else if (argeq(args, 0, \"::text\")) {\n pg->oid = TEXTOID;\n } else {\n char err[128];\n snprintf(err, sizeof(err), \"unknown type '%.*s'\", \n (int)(args->bufs[0].len-2), args->bufs[0].data+2);\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n return false;\n }\n args_remove_first(args);\n if (args->len == 0) {\n if (pg->oid == BYTEAOID) {\n pg_write_completef(conn, \"BYTEA\");\n } else {\n pg_write_completef(conn, \"TEXT\");\n }\n pg_write_ready(conn, 'I');\n return false;\n }\n }\n }\n return true;\n}\n"], ["/pogocache/src/cmds.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit cmd.c handles all incoming client commands.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"save.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"conn.h\"\n#include \"xmalloc.h\"\n#include \"pogocache.h\"\n#include \"stats.h\"\n\n// from main.c\nextern const uint64_t seed;\nextern const char *path;\nextern const int verb;\nextern const char *auth;\nextern const bool useauth;\nextern const char *persist;\nextern const int nthreads;\nextern const char *version;\nextern const char *githash;\nextern atomic_int_fast64_t flush_delay;\nextern atomic_bool sweep;\nextern atomic_bool lowmem;\nextern const int nshards;\nextern const int narenas;\nextern const int64_t procstart;\nextern const int maxconns;\n\nextern struct pogocache *cache;\n\nstruct set_entry_context {\n bool written;\n struct conn *conn;\n const char *cmdname;\n};\n\nstatic bool set_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)val, (void)vallen,\n (void)expires, (void)flags, (void)cas;\n struct set_entry_context *ctx = udata;\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n pg_write_row_desc(ctx->conn, (const char*[]){ \"value\" }, 1);\n pg_write_row_data(ctx->conn, (const char*[]){ val }, \n (size_t[]){ vallen }, 1);\n pg_write_completef(ctx->conn, \"%s 1\", ctx->cmdname);\n pg_write_ready(ctx->conn, 'I');\n } else {\n conn_write_bulk(ctx->conn, val, vallen);\n }\n ctx->written = true;\n return true;\n}\n\nstatic void execSET(struct conn *conn, const char *cmdname, \n int64_t now, const char *key,\n size_t keylen, const char *val, size_t vallen, int64_t expires, bool nx,\n bool xx, bool get, bool keepttl, uint32_t flags, uint64_t cas, bool withcas)\n{\n stat_cmd_set_incr(conn);\n struct set_entry_context ctx = { .conn = conn, .cmdname = cmdname };\n struct pogocache_store_opts opts = {\n .time = now,\n .expires = expires,\n .cas = cas,\n .flags = flags,\n .keepttl = keepttl,\n .casop = withcas,\n .nx = nx,\n .xx = xx,\n .lowmem = atomic_load_explicit(&lowmem, __ATOMIC_ACQUIRE),\n .entry = get?set_entry:0,\n .udata = get?&ctx:0,\n };\n int status = pogocache_store(cache, key, keylen, val, vallen, &opts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n return;\n }\n if (get) {\n if (!ctx.written) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n pg_write_completef(conn, \"%s 0\", cmdname);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_null(conn);\n }\n }\n return;\n }\n bool stored = status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED;\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (!stored) {\n if (status == POGOCACHE_FOUND) {\n conn_write_raw(conn, \"EXISTS\\r\\n\", 8);\n } else {\n conn_write_raw(conn, \"NOT_FOUND\\r\\n\", 12);\n }\n } else {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n }\n break;\n case PROTO_HTTP:\n if (!stored) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Stored\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"%s %d\", cmdname, stored?1:0);\n pg_write_ready(conn, 'I');\n break;\n default:\n if (!stored) {\n conn_write_null(conn);\n } else {\n conn_write_string(conn, \"OK\");\n }\n break;\n }\n}\n\nstatic int64_t expiry_seconds_time(struct conn *conn, int64_t now, \n int64_t expiry)\n{\n if (conn_proto(conn) == PROTO_MEMCACHE && expiry > HOUR*24*30) {\n // Consider Unix time value rather than an offset from current time.\n int64_t unix_ = sys_unixnow();\n if (expiry > unix_) {\n expiry = expiry-sys_unixnow();\n } else {\n expiry = 0;\n }\n }\n return int64_add_clamp(now, expiry);\n}\n\n// SET key value [NX | XX] [GET] [EX seconds | PX milliseconds |\n// EXAT unix-time-seconds | PXAT unix-time-milliseconds | KEEPTTL] \n// [FLAGS flags] [CAS cas] \nstatic void cmdSET(struct conn *conn, struct args *args) {\n#ifdef CMDSETOK\n // For testing the theoretical top speed of a single SET command.\n // No data is stored.\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw(conn, \"STORED\\r\\n\", 8);\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\n#endif\n // RESP command\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n const char *val = args->bufs[2].data;\n size_t vallen = args->bufs[2].len;\n int64_t expires = 0;\n int exkind = 0;\n bool nx = false;\n bool xx = false;\n bool get = false;\n bool keepttl = false;\n bool hasex = false;\n uint32_t flags = 0;\n uint64_t cas = 0;\n bool withcas = false;\n for (size_t i = 3; i < args->len; i++) {\n if (argeq(args, i, \"ex\")) {\n exkind = 1;\n goto parse_ex;\n } else if (argeq(args, i, \"px\")) {\n exkind = 2;\n goto parse_ex;\n } else if (argeq(args, i, \"exat\")) {\n exkind = 3;\n goto parse_ex;\n } else if (argeq(args, i, \"pxat\")) {\n exkind = 4;\n parse_ex:\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, \n &expires);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n if (expires <= 0) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n // memcache allows for negative expiration\n expires = expiry_seconds_time(conn, now, 0);\n goto skip_exkind;\n } else {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n }\n switch (exkind) {\n case 1:\n expires = int64_mul_clamp(expires, SECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 2:\n expires = int64_mul_clamp(expires, MILLISECOND);\n expires = expiry_seconds_time(conn, now, expires);\n break;\n case 3:\n expires = int64_mul_clamp(expires, SECOND);\n break;\n case 4:\n expires = int64_mul_clamp(expires, MILLISECOND);\n break;\n }\n skip_exkind:\n hasex = true;\n } else if (argeq(args, i, \"nx\")) {\n nx = true;\n } else if (argeq(args, i, \"xx\")) {\n xx = true;\n } else if (argeq(args, i, \"get\")) {\n get = true;\n } else if (argeq(args, i, \"keepttl\")) {\n keepttl = true;\n } else if (argeq(args, i, \"flags\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n uint64_t x;\n if (!argu64(args, i, &x)) {\n goto err_syntax;\n }\n flags = x&UINT32_MAX;\n } else if (argeq(args, i, \"cas\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n if (!argu64(args, i, &cas)) {\n goto err_syntax;\n }\n withcas = true;\n } else {\n goto err_syntax;\n }\n }\n assert(expires >= 0);\n if (keepttl && hasex > 0){\n goto err_syntax;\n }\n if (xx && nx > 0){\n goto err_syntax;\n }\n execSET(conn, \"SET\", now, key, keylen, val, vallen, expires, nx, xx, get,\n keepttl, flags, cas, withcas);\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n}\n\nstatic void cmdSETEX(struct conn *conn, struct args *args) {\n if (args->len != 4) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t ex = 0;\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool ok = parse_i64(args->bufs[2].data, args->bufs[2].len, &ex);\n if (!ok || ex <= 0) {\n conn_write_error(conn, \"ERR invalid expire time\");\n return;\n }\n ex = int64_mul_clamp(ex, SECOND);\n ex = int64_add_clamp(sys_now(), ex);\n const char *val = args->bufs[3].data;\n size_t vallen = args->bufs[3].len;\n execSET(conn, \"SETEX\", now, key, keylen, val, vallen, ex, 0, 0, 0, 0, 0, 0,\n 0);\n}\n\nstruct get_entry_context {\n struct conn *conn;\n bool cas;\n bool mget;\n};\n\nstatic void get_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)key, (void)keylen, (void)cas;\n (void)shard, (void)time, (void)expires, (void)flags, (void)update;\n struct get_entry_context *ctx = udata;\n int x;\n uint8_t buf[24];\n size_t n;\n switch (conn_proto(ctx->conn)) {\n case PROTO_POSTGRES:;\n char casbuf[24];\n if (ctx->cas) {\n x = 1;\n n = snprintf(casbuf, sizeof(casbuf), \"%\" PRIu64, cas);\n } else {\n x = 0;\n casbuf[0] = '\\0';\n n = 0;\n }\n if (ctx->mget) {\n pg_write_row_data(ctx->conn, (const char*[]){ key, val, casbuf }, \n (size_t[]){ keylen, vallen, n }, 2+x);\n } else {\n pg_write_row_data(ctx->conn, (const char*[]){ val, casbuf }, \n (size_t[]){ vallen, n }, 1+x);\n }\n break;\n case PROTO_MEMCACHE:\n conn_write_raw(ctx->conn, \"VALUE \", 6);\n conn_write_raw(ctx->conn, key, keylen);\n n = u64toa(flags, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n n = u64toa(vallen, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n if (ctx->cas) {\n n = u64toa(cas, buf);\n conn_write_raw(ctx->conn, \" \", 1);\n conn_write_raw(ctx->conn, buf, n);\n }\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n conn_write_raw(ctx->conn, val, vallen);\n conn_write_raw(ctx->conn, \"\\r\\n\", 2);\n break;\n case PROTO_HTTP:\n conn_write_http(ctx->conn, 200, \"OK\", val, vallen);\n break;\n default:\n if (ctx->cas) {\n conn_write_array(ctx->conn, 2);\n conn_write_uint(ctx->conn, cas);\n }\n conn_write_bulk(ctx->conn, val, vallen);\n }\n}\n\n// GET key\nstatic void cmdGET(struct conn *conn, struct args *args) {\n stat_cmd_get_incr(conn);\n#ifdef CMDGETNIL\n conn_write_null(conn);\n return;\n#endif\n#ifdef CMDSETOK\n conn_write_string(conn, \"$1\\r\\nx\\r\\n\");\n return;\n#endif\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n struct get_entry_context ctx = { \n .conn = conn\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"value\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_HTTP) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\" , -1);\n } else if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 0\");\n } else {\n conn_write_null(conn);\n }\n } else {\n stat_get_hits_incr(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_complete(conn, \"GET 1\");\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_ready(conn, 'I');\n }\n}\n\n// MGET key [key...]\nstatic void cmdMGET(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct get_entry_context ctx = { \n .conn = conn,\n .mget = true,\n .cas = argeq(args, 0, \"mgets\"),\n };\n struct pogocache_load_opts opts = {\n .time = now,\n .entry = get_entry,\n .udata = &ctx,\n };\n int count = 0;\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\", \"value\", \"cas\" }, \n 2+(ctx.cas?1:0));\n } else if (proto == PROTO_RESP) {\n conn_write_array(conn, args->len-1);\n }\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_get_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_null(conn);\n }\n } else {\n count++;\n stat_get_hits_incr(conn);\n }\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"MGET %d\", count);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n }\n}\n\nstruct keys_ctx {\n int64_t now;\n struct buf buf;\n size_t count;\n char *pattern;\n size_t plen;\n};\n\nstatic void keys_ctx_free(struct keys_ctx *ctx) {\n xfree(ctx->pattern);\n buf_clear(&ctx->buf);\n xfree(ctx);\n}\n\n// pattern matcher\n// see https://github.com/tidwall/match.c\nstatic bool match(const char *pat, size_t plen, const char *str, size_t slen,\n int depth)\n{\n if (depth == 128) {\n return false;\n }\n while (plen > 0) {\n if (pat[0] == '\\\\') {\n if (plen == 1) return false;\n pat++; plen--; \n } else if (pat[0] == '*') {\n if (plen == 1) return true;\n if (pat[1] == '*') {\n pat++; plen--;\n continue;\n }\n if (match(pat+1, plen-1, str, slen, depth+1)) return true;\n if (slen == 0) return false;\n str++; slen--;\n continue;\n }\n if (slen == 0) return false;\n if (pat[0] != '?' && str[0] != pat[0]) return false;\n pat++; plen--;\n str++; slen--;\n }\n return slen == 0 && plen == 0;\n}\n\nstatic int keys_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *value, size_t valuelen, int64_t expires, uint32_t flags,\n uint64_t cas, void *udata)\n{\n (void)shard, (void)time, (void)value, (void)valuelen, (void)expires, \n (void)flags, (void)cas;\n struct keys_ctx *ctx = udata;\n if ((ctx->plen == 1 && *ctx->pattern == '*') || \n match(ctx->pattern, ctx->plen, key, keylen, 0))\n {\n buf_append_uvarint(&ctx->buf, keylen);\n buf_append(&ctx->buf, key, keylen);\n ctx->count++;\n }\n return POGOCACHE_ITER_CONTINUE;\n}\n\nstatic void bgkeys_work(void *udata) {\n struct keys_ctx *ctx = udata;\n struct pogocache_iter_opts opts = {\n .time = ctx->now,\n .entry = keys_entry,\n .udata = ctx,\n };\n pogocache_iter(cache, &opts);\n}\n\nstatic void bgkeys_done(struct conn *conn, void *udata) {\n struct keys_ctx *ctx = udata;\n int proto = conn_proto(conn);\n const char *p = ctx->buf.data;\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"key\" }, 1);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n pg_write_row_data(conn, (const char*[]){ key }, \n (size_t[]){ keylen }, 1);\n }\n pg_write_completef(conn, \"KEYS %zu\", ctx->count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_array(conn, ctx->count);\n for (size_t i = 0; i < ctx->count; i++) {\n uint64_t keylen;\n p += varint_read_u64(p, 10, &keylen);\n const char *key = p;\n p += keylen;\n conn_write_bulk(conn, key, keylen);\n }\n }\n keys_ctx_free(ctx);\n}\n\nstatic void cmdKEYS(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *pattern = args->bufs[1].data;\n size_t plen = args->bufs[1].len;\n struct keys_ctx *ctx = xmalloc(sizeof(struct keys_ctx));\n memset(ctx, 0, sizeof(struct keys_ctx));\n ctx->pattern = xmalloc(plen+1);\n memcpy(ctx->pattern, pattern, plen);\n ctx->pattern[plen] = '\\0';\n ctx->plen = plen;\n ctx->now = now;\n if (!conn_bgwork(conn, bgkeys_work, bgkeys_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n keys_ctx_free(ctx);\n }\n}\n\nstatic void cmdDEL(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n struct pogocache_delete_opts opts = {\n .time = now,\n };\n int64_t deleted = 0;\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_delete(cache, key, keylen, &opts);\n if (status == POGOCACHE_DELETED) {\n stat_delete_hits_incr(conn);\n deleted++;\n } else {\n stat_delete_misses_incr(conn);\n }\n }\n switch (conn_proto(conn)) {\n case PROTO_MEMCACHE:\n if (deleted == 0) {\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n } else {\n conn_write_raw_cstr(conn, \"DELETED\\r\\n\");\n }\n break;\n case PROTO_HTTP:\n if (deleted == 0) {\n conn_write_http(conn, 404, \"Not Found\", \"Not Found\\r\\n\", -1);\n } else {\n conn_write_http(conn, 200, \"OK\", \"Deleted\\r\\n\", -1);\n }\n break;\n case PROTO_POSTGRES:\n pg_write_completef(conn, \"DEL %\" PRIi64, deleted);\n pg_write_ready(conn, 'I');\n break;\n default:\n conn_write_int(conn, deleted);\n }\n}\n\nstatic void cmdDBSIZE(struct conn *conn, struct args *args) {\n if (args->len != 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n struct pogocache_count_opts opts = { .time = sys_now() };\n size_t count = pogocache_count(cache, &opts);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"count\", count, \"DBSIZE\");\n } else {\n conn_write_int(conn, (int64_t)count);\n }\n}\n\nstruct flushctx { \n pthread_t th;\n int64_t time;\n int start;\n int count;\n};\n\nstatic void *thflush(void *arg) {\n struct flushctx *ctx = arg;\n struct pogocache_clear_opts opts = { .time = sys_now(), .oneshard = true };\n for (int i = 0; i < ctx->count; i++) {\n opts.oneshardidx = i+ctx->start;\n pogocache_clear(cache, &opts);\n }\n return 0;\n}\n\nstatic void bgflushwork(void *udata) {\n (void)udata;\n atomic_store(&flush_delay, 0);\n int64_t now = sys_now();\n int nprocs = sys_nprocs();\n if (nprocs > nshards) {\n nprocs = nshards;\n }\n struct flushctx *ctxs = xmalloc(nprocs*sizeof(struct flushctx));\n memset(ctxs, 0, nprocs*sizeof(struct flushctx));\n int start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n ctx->start = start;\n ctx->count = nshards/nprocs;\n ctx->time = now;\n if (i == nprocs-1) {\n ctx->count = nshards-ctx->start;\n }\n if (pthread_create(&ctx->th, 0, thflush, ctx) == -1) {\n ctx->th = 0;\n }\n start += ctx->count;\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n thflush(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct flushctx *ctx = &ctxs[i];\n if (ctx->th != 0) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n}\n\nstatic void bgflushdone(struct conn *conn, void *udata) {\n const char *cmdname = udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s SYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\n// FLUSHALL [SYNC|ASYNC] [DELAY ]\nstatic void cmdFLUSHALL(struct conn *conn, struct args *args) {\n const char *cmdname = \n args_eq(args, 0, \"flush\") ? \"FLUSH\" :\n args_eq(args, 0, \"flushdb\") ? \"FLUSHDB\" :\n \"FLUSHALL\";\n stat_cmd_flush_incr(conn);\n bool async = false;\n int64_t delay = 0;\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"async\")) {\n async = true;\n } else if (argeq(args, i, \"sync\")) {\n async = false;\n } else if (argeq(args, i, \"delay\")) {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n bool ok = parse_i64(args->bufs[i].data, args->bufs[i].len, &delay);\n if (!ok) {\n conn_write_error(conn, \"ERR invalid exptime argument\");\n return;\n }\n if (delay > 0) {\n async = true;\n }\n } else {\n goto err_syntax;\n }\n }\n if (async) {\n if (delay < 0) {\n delay = 0;\n }\n delay = int64_mul_clamp(delay, SECOND);\n delay = int64_add_clamp(delay, sys_now());\n atomic_store(&flush_delay, delay);\n // ticker will check the delay and perform the flush\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s ASYNC\", cmdname);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n // Flush database is slow. cmdname is static and thread safe\n conn_bgwork(conn, bgflushwork, bgflushdone, (void*)cmdname);\n return;\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct bgsaveloadctx {\n bool ok; // true = success, false = out of disk space\n bool fast; // use all the proccesing power, otherwise one thread.\n char *path; // path to file\n bool load; // otherwise save\n};\n\nstatic void bgsaveloadwork(void *udata) {\n struct bgsaveloadctx *ctx = udata;\n int64_t start = sys_now();\n int status;\n if (ctx->load) {\n status = load(ctx->path, ctx->fast, 0);\n } else {\n status = save(ctx->path, ctx->fast);\n }\n printf(\". %s finished %.3f secs\\n\", ctx->load?\"load\":\"save\", \n (sys_now()-start)/1e9);\n ctx->ok = status == 0;\n}\n\nstatic void bgsaveloaddone(struct conn *conn, void *udata) {\n struct bgsaveloadctx *ctx = udata;\n if (ctx->ok) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s OK\", ctx->load?\"LOAD\":\"SAVE\");\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"OK\\r\\n\");\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (ctx->load) {\n conn_write_error(conn, \"load failed\");\n } else {\n conn_write_error(conn, \"save failed\");\n }\n }\n xfree(ctx->path);\n xfree(ctx);\n}\n\n// SAVE [TO ] [FAST]\n// LOAD [FROM ] [FAST]\nstatic void cmdSAVELOAD(struct conn *conn, struct args *args) {\n bool load = argeq(args, 0, \"load\");\n bool fast = false;\n const char *path = persist;\n size_t plen = strlen(persist);\n for (size_t i = 1; i < args->len; i++) {\n if (argeq(args, i, \"fast\")) {\n fast = true;\n } else if ((load && argeq(args, i, \"from\")) || \n (!load && argeq(args, i, \"to\")))\n {\n i++;\n if (i == args->len) {\n goto err_syntax;\n }\n path = args->bufs[i].data;\n plen = args->bufs[i].len;\n } else {\n goto err_syntax;\n }\n }\n if (plen == 0) {\n conn_write_error(conn, \"ERR path not provided\");\n return;\n }\n struct bgsaveloadctx *ctx = xmalloc(sizeof(struct bgsaveloadctx));\n memset(ctx, 0, sizeof(struct bgsaveloadctx));\n ctx->fast = fast;\n ctx->path = xmalloc(plen+1);\n ctx->load = load;\n memcpy(ctx->path, path, plen);\n ctx->path[plen] = '\\0';\n if (!conn_bgwork(conn, bgsaveloadwork, bgsaveloaddone, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx->path);\n xfree(ctx);\n }\n return;\nerr_syntax:\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\nstruct ttlctx {\n struct conn *conn;\n bool pttl;\n};\n\nstatic void ttl_entry(int shard, int64_t time, const void *key, size_t keylen,\n const void *val, size_t vallen, int64_t expires, uint32_t flags,\n uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)key, (void)keylen, (void)val, (void)vallen, (void)flags,\n (void)cas, (void)update;\n struct ttlctx *ctx = udata;\n int64_t ttl;\n if (expires > 0) {\n ttl = expires-time;\n if (ctx->pttl) {\n ttl /= MILLISECOND;\n } else {\n ttl /= SECOND;\n }\n } else {\n ttl = -1;\n }\n if (conn_proto(ctx->conn) == PROTO_POSTGRES) {\n char ttlstr[24];\n size_t n = i64toa(ttl, (uint8_t*)ttlstr);\n pg_write_row_data(ctx->conn, (const char*[]){ ttlstr }, \n (size_t[]){ n }, 1);\n } else {\n conn_write_int(ctx->conn, ttl);\n }\n}\n\nstatic void cmdTTL(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n bool pttl = argeq(args, 0, \"pttl\");\n struct ttlctx ctx = { .conn = conn, .pttl = pttl };\n struct pogocache_load_opts opts = {\n .time = sys_now(),\n .entry = ttl_entry,\n .notouch = true,\n .udata = &ctx,\n };\n int proto = conn_proto(conn);\n if (proto == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ pttl?\"pttl\":\"ttl\" }, 1);\n }\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_NOTFOUND) {\n stat_get_misses_incr(conn);\n if (proto == PROTO_RESP) {\n conn_write_int(conn, -2);\n }\n } else {\n stat_get_hits_incr(conn);\n }\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %d\", pttl?\"PTTL\":\"TTL\",\n status!=POGOCACHE_NOTFOUND);\n pg_write_ready(conn, 'I');\n }\n}\n\nstatic void expire_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)expires, (void)cas;\n struct pogocache_update *ctx = udata;\n ctx->flags = flags;\n ctx->value = value;\n ctx->valuelen = valuelen;\n *update = ctx;\n}\n\n// EXPIRE key seconds\n// returns 1 if success or 0 on failure. \nstatic void cmdEXPIRE(struct conn *conn, struct args *args) {\n if (args->len < 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n const char *key = args->bufs[1].data;\n size_t keylen = args->bufs[1].len;\n int64_t expires;\n if (!argi64(args, 2, &expires)) {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n return;\n }\n expires = int64_mul_clamp(expires, POGOCACHE_SECOND);\n expires = int64_add_clamp(now, expires);\n struct pogocache_update ctx = { .expires = expires };\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = expire_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(cache, key, keylen, &lopts);\n int ret = status == POGOCACHE_FOUND;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"EXPIRE %d\", ret);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, ret);\n }\n}\n\n// EXISTS key [key...]\n// Checks if one or more keys exist in the cache.\n// Return the number of keys that exist\nstatic void cmdEXISTS(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t count = 0;\n struct pogocache_load_opts opts = {\n .time = now,\n .notouch = true,\n };\n for (size_t i = 1; i < args->len; i++) {\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n count++;\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_i64_ready(conn, \"exists\", count, \"EXISTS\");\n } else {\n conn_write_int(conn, count);\n }\n}\n\nstatic void sweep_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n size_t swept;\n size_t kept;\n struct pogocache_sweep_opts opts = {\n .time = start,\n };\n printf(\". sweep started\\n\");\n pogocache_sweep(cache, &swept, &kept, &opts);\n double elapsed = (sys_now()-start)/1e9;\n printf(\". sweep finished in %.2fs, (swept=%zu, kept=%zu) \\n\", elapsed, \n swept, kept);\n}\n\nstatic void sweep_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thsweep(void *arg) {\n (void)arg;\n sweep_work(0);\n return 0;\n}\n\n// SWEEP [ASYNC]\nstatic void cmdSWEEP(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thsweep, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"SWEEP ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, sweep_work, sweep_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstatic void purge_work(void *udata) {\n (void)udata;\n int64_t start = sys_now();\n printf(\". purge started\\n\");\n xpurge();\n double elapsed = (sys_now()-start)/1e9;\n printf(\". purge finished in %.2fs\\n\", elapsed);\n}\n\nstatic void purge_done(struct conn *conn, void *udata) {\n (void)udata;\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE SYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstatic void *thpurge(void *arg) {\n (void)arg;\n purge_work(0);\n return 0;\n}\n\n// PURGE [ASYNC]\nstatic void cmdPURGE(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool async = false;\n if (args->len == 2) {\n if (argeq(args, 1, \"async\")) {\n async = true;\n } else {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n }\n if (async) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thpurge, 0);\n if (ret == -1) {\n conn_write_error(conn, \"ERR failed to do work\");\n return;\n }\n pthread_detach(th);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"PURGE ASYNC\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n } else {\n if (!conn_bgwork(conn, purge_work, purge_done, 0)) {\n conn_write_error(conn, \"ERR failed to do work\");\n }\n }\n}\n\nstruct populate_ctx {\n pthread_t th;\n size_t start;\n size_t count;\n char *prefix;\n size_t prefixlen;\n char *val;\n size_t vallen;\n bool randex;\n int randmin;\n int randmax;\n};\n\nstatic void *populate_entry(void *arg) {\n int64_t now = sys_now();\n struct populate_ctx *ctx = arg;\n char *key = xmalloc(ctx->prefixlen+32);\n memcpy(key, ctx->prefix, ctx->prefixlen);\n key[ctx->prefixlen++] = ':';\n for (size_t i = ctx->start; i < ctx->start+ctx->count; i++) {\n size_t n = i64toa(i, (uint8_t*)(key+ctx->prefixlen));\n size_t keylen = ctx->prefixlen+n;\n struct pogocache_store_opts opts = { \n .time = now,\n };\n if (ctx->randex) {\n int ex = (rand()%(ctx->randmax-ctx->randmin))+ctx->randmin;\n opts.ttl = ex*POGOCACHE_SECOND;\n }\n pogocache_store(cache, key, keylen, ctx->val, ctx->vallen, &opts);\n }\n xfree(key);\n return 0;\n}\n\n// DEBUG POPULATE [rand-ex-range]\n// DEBUG POPULATE \n// DEBUG POPULATE 1000000 test 16\n// DEBUG POPULATE 1000000 test 16 5-10\nstatic void cmdDEBUG_populate(struct conn *conn, struct args *args) {\n if (args->len != 4 && args->len != 5) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t count;\n if (!argi64(args, 1, &count) || count < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n size_t prefixlen = args->bufs[2].len;\n char *prefix = args->bufs[2].data;\n int64_t vallen;\n if (!argi64(args, 3, &vallen) || vallen < 0) {\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n bool randex = false;\n int randmin = 0;\n int randmax = 0;\n if (args->len == 5) {\n size_t exlen = args->bufs[4].len;\n char *aex = args->bufs[4].data;\n char *ex = xmalloc(exlen+1);\n memcpy(ex, aex, exlen);\n ex[exlen] = '\\0';\n if (strchr(ex, '-')) {\n randmin = atoi(ex);\n randmax = atoi(strchr(ex, '-')+1);\n randex = true;\n }\n xfree(ex);\n }\n\n char *val = xmalloc(vallen);\n memset(val, 0, vallen);\n int nprocs = sys_nprocs();\n if (nprocs < 0) {\n nprocs = 1;\n }\n struct populate_ctx *ctxs = xmalloc(nprocs*sizeof(struct populate_ctx));\n memset(ctxs, 0, nprocs*sizeof(struct populate_ctx));\n size_t group = count/nprocs;\n size_t start = 0;\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n ctx->start = start;\n if (i == nprocs-1) {\n ctx->count = count-start;\n } else {\n ctx->count = group;\n }\n ctx->prefix = prefix;\n ctx->prefixlen = prefixlen;\n ctx->val = val;\n ctx->vallen = vallen;\n ctx->randex = randex;\n ctx->randmin = randmin;\n ctx->randmax = randmax;\n if (pthread_create(&ctx->th, 0, populate_entry, ctx) == -1) {\n ctx->th = 0;\n }\n start += group;\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th == 0) {\n populate_entry(ctx);\n }\n }\n for (int i = 0; i < nprocs; i++) {\n struct populate_ctx *ctx = &ctxs[i];\n if (ctx->th) {\n pthread_join(ctx->th, 0);\n }\n }\n xfree(ctxs);\n xfree(val);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"DEBUG POPULATE %\" PRIi64, count);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n}\n\nstruct dbg_detach_ctx {\n int64_t now;\n int64_t then;\n};\n\nstatic void detach_work(void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n ctx->then = sys_now();\n // printf(\". ----- DELAY START\\n\");\n // sleep(1);\n // printf(\". ----- DELAY END\\n\");\n}\n\nstatic void detach_done(struct conn *conn, void *udata) {\n struct dbg_detach_ctx *ctx = udata;\n char buf[128];\n snprintf(buf, sizeof(buf), \"%\" PRId64 \":%\" PRId64, ctx->now, ctx->then);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_str_ready(conn, \"detach\", buf, \"DEBUG DETACH\");\n } else {\n conn_write_bulk_cstr(conn, buf);\n }\n xfree(ctx);\n}\n\n// DEBUG detach\nstatic void cmdDEBUG_detach(struct conn *conn, struct args *args) {\n (void)args;\n struct dbg_detach_ctx *ctx = xmalloc(sizeof(struct dbg_detach_ctx));\n memset(ctx, 0,sizeof(struct dbg_detach_ctx));\n ctx->now = sys_now();\n if (!conn_bgwork(conn, detach_work, detach_done, ctx)) {\n conn_write_error(conn, \"ERR failed to do work\");\n xfree(ctx);\n }\n}\n\n// DEBUG subcommand (args...)\nstatic void cmdDEBUG(struct conn *conn, struct args *args) {\n if (args->len <= 1) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n // args = args[1:]\n args = &(struct args){ .bufs = args->bufs+1, .len = args->len-1 };\n if (argeq(args, 0, \"populate\")) {\n cmdDEBUG_populate(conn, args);\n } else if (argeq(args, 0, \"detach\")) {\n cmdDEBUG_detach(conn, args);\n } else {\n conn_write_error(conn, \"ERR unknown subcommand\");\n }\n}\n\nstatic void cmdECHO(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"ECHO\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n}\n\nstatic void cmdPING(struct conn *conn, struct args *args) {\n if (args->len > 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n if (args->len == 1) {\n pg_write_simple_row_str_ready(conn, \"message\", \"PONG\", \"PING\"); \n } else {\n pg_write_simple_row_data_ready(conn, \"message\", args->bufs[1].data, \n args->bufs[1].len, \"PING\");\n }\n } else {\n if (args->len == 1) {\n conn_write_string(conn, \"PONG\");\n } else {\n conn_write_bulk(conn, args->bufs[1].data, args->bufs[1].len);\n }\n }\n}\n\nstatic void cmdQUIT(struct conn *conn, struct args *args) {\n (void)args;\n if (conn_proto(conn) == PROTO_RESP) {\n conn_write_string(conn, \"OK\");\n }\n conn_close(conn);\n}\n\n// TOUCH key [key...]\nstatic void cmdTOUCH(struct conn *conn, struct args *args) {\n if (args->len < 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int64_t now = sys_now();\n int64_t touched = 0;\n struct pogocache_load_opts opts = { \n .time = now,\n };\n for (size_t i = 1; i < args->len; i++) {\n stat_cmd_touch_incr(conn);\n const char *key = args->bufs[i].data;\n size_t keylen = args->bufs[i].len;\n int status = pogocache_load(cache, key, keylen, &opts);\n if (status == POGOCACHE_FOUND) {\n stat_touch_hits_incr(conn);\n touched++;\n } else {\n stat_touch_misses_incr(conn);\n }\n }\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_completef(conn, \"TOUCH %\" PRIi64, touched);\n pg_write_ready(conn, 'I');\n } else {\n conn_write_int(conn, touched);\n }\n}\n\nstruct get64ctx {\n bool ok;\n bool isunsigned;\n union {\n int64_t ival;\n uint64_t uval;\n };\n int64_t expires;\n uint32_t flags;\n uint64_t cas;\n};\n\nunion delta { \n uint64_t u;\n int64_t i;\n};\n\nstatic void get64(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires,\n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update;\n struct get64ctx *ctx = udata;\n ctx->flags = flags;\n ctx->expires = expires;\n ctx->cas = cas;\n if (ctx->isunsigned) {\n ctx->ok = parse_u64(val, vallen, &ctx->uval);\n } else {\n ctx->ok = parse_i64(val, vallen, &ctx->ival);\n }\n}\n\nstatic void execINCRDECR(struct conn *conn, const char *key, size_t keylen, \n union delta delta, bool decr, bool isunsigned, const char *cmdname)\n{\n bool hit = false;\n bool miss = false;\n int64_t now = sys_now();\n struct get64ctx ctx = { .isunsigned = isunsigned };\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts gopts = {\n .time = now,\n .entry = get64,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &gopts);\n bool found = status == POGOCACHE_FOUND;\n if (found && !ctx.ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR cannot increment or \"\n \"decrement non-numeric value\\r\\n\");\n goto done;\n }\n goto fail_value_non_numeric;\n } else if (!found && conn_proto(conn) == PROTO_MEMCACHE) {\n miss = true;\n conn_write_raw_cstr(conn, \"NOT_FOUND\\r\\n\");\n goto done;\n }\n // add or subtract\n bool overflow;\n if (isunsigned) {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.uval, delta.u, &ctx.uval);\n } else {\n overflow = __builtin_add_overflow(ctx.uval, delta.u, &ctx.uval);\n }\n } else {\n if (decr) {\n overflow = __builtin_sub_overflow(ctx.ival, delta.i, &ctx.ival);\n } else {\n overflow = __builtin_add_overflow(ctx.ival, delta.i, &ctx.ival);\n }\n }\n if (overflow && conn_proto(conn) != PROTO_MEMCACHE) {\n goto fail_overflow;\n }\n // re-set the value\n char val[24];\n size_t vallen;\n if (isunsigned) {\n vallen = u64toa(ctx.uval, (uint8_t*)val);\n } else {\n vallen = i64toa(ctx.ival, (uint8_t*)val);\n }\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires, \n .flags = ctx.flags, \n .cas = ctx.cas,\n .udata = &ctx,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n if (status == POGOCACHE_NOMEM) {\n stat_store_no_memory_incr(conn);\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n char val[24];\n if (isunsigned) {\n snprintf(val, sizeof(val), \"%\" PRIu64, ctx.uval);\n } else {\n snprintf(val, sizeof(val), \"%\" PRIi64, ctx.ival);\n }\n pg_write_simple_row_str_readyf(conn, \"value\", val, \"%s\", cmdname);\n } else {\n if (isunsigned) {\n conn_write_uint(conn, ctx.uval);\n } else {\n conn_write_int(conn, ctx.ival);\n }\n }\n hit = true;\n goto done;\nfail_value_non_numeric:\n conn_write_error(conn, ERR_INVALID_INTEGER);\n goto done;\nfail_overflow:\n conn_write_error(conn, \"ERR increment or decrement would overflow\");\n goto done;\ndone:\n if (hit) {\n if (decr) {\n stat_decr_hits_incr(conn);\n } else {\n stat_incr_hits_incr(conn);\n }\n } else if (miss) {\n if (decr) {\n stat_decr_misses_incr(conn);\n } else {\n stat_incr_misses_incr(conn);\n }\n }\n pogocache_end(batch);\n}\n\nstatic void cmdINCRDECRBY(struct conn *conn, struct args *args, \n bool decr, const char *cmdname)\n{\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta;\n bool ok;\n if (isunsigned) {\n ok = argu64(args, 2, &delta.u);\n } else {\n ok = argi64(args, 2, &delta.i);\n }\n if (!ok) {\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"CLIENT_ERROR invalid numeric delta \"\n \"argument\\r\\n\");\n } else {\n conn_write_error(conn, ERR_INVALID_INTEGER);\n }\n return;\n }\n execINCRDECR(conn, key, keylen, delta, decr, isunsigned, cmdname);\n}\n\n// DECRBY key num\nstatic void cmdDECRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, true, \"DECRBY\");\n}\n\n// INCRBY key num\nstatic void cmdINCRBY(struct conn *conn, struct args *args) {\n cmdINCRDECRBY(conn, args, false, \"INCRBY\");\n}\n\n// DECR key\nstatic void cmdDECR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, true, isunsigned, \"DECR\");\n}\n\n// INCR key\nstatic void cmdINCR(struct conn *conn, struct args *args) {\n if (args->len != 2) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n bool isunsigned = tolower(args->bufs[0].data[0]) == 'u';\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n union delta delta = { .i = 1 };\n execINCRDECR(conn, key, keylen, delta, false, isunsigned, \"INCR\");\n}\n\nstruct appendctx {\n bool prepend;\n uint32_t flags;\n int64_t expires;\n const char *val;\n size_t vallen;\n char *outval;\n size_t outvallen;\n};\n\nstatic void append_entry(int shard, int64_t time, const void *key,\n size_t keylen, const void *val, size_t vallen, int64_t expires, \n uint32_t flags, uint64_t cas, struct pogocache_update **update, void *udata)\n{\n (void)shard, (void)time, (void)key, (void)keylen, (void)update, (void)cas;\n struct appendctx *ctx = udata;\n ctx->expires = expires;\n ctx->flags = flags;\n ctx->outvallen = vallen+ctx->vallen;\n ctx->outval = xmalloc(ctx->outvallen);\n if (ctx->prepend) {\n memcpy(ctx->outval, ctx->val, ctx->vallen);\n memcpy(ctx->outval+ctx->vallen, val, vallen);\n } else {\n memcpy(ctx->outval, val, vallen);\n memcpy(ctx->outval+vallen, ctx->val, ctx->vallen);\n }\n}\n\n// APPEND \nstatic void cmdAPPEND(struct conn *conn, struct args *args) {\n int64_t now = sys_now();\n if (args->len != 3) {\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n int proto = conn_proto(conn);\n bool prepend = argeq(args, 0, \"prepend\");\n size_t keylen;\n const char *key = args_at(args, 1, &keylen);\n size_t vallen;\n const char *val = args_at(args, 2, &vallen);\n struct appendctx ctx = { \n .prepend = prepend,\n .val = val,\n .vallen = vallen,\n };\n size_t len;\n // Use a batch transaction for key isolation.\n struct pogocache *batch = pogocache_begin(cache);\n struct pogocache_load_opts lopts = { \n .time = now,\n .entry = append_entry,\n .udata = &ctx,\n };\n int status = pogocache_load(batch, key, keylen, &lopts);\n if (status == POGOCACHE_NOTFOUND) {\n if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"NOT_STORED\\r\\n\");\n goto done;\n }\n len = vallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n };\n status = pogocache_store(batch, key, keylen, val, vallen, &sopts);\n } else {\n if (ctx.outvallen > MAXARGSZ) {\n // do not let values become larger than 500MB\n xfree(ctx.outval);\n conn_write_error(conn, \"ERR value too large\");\n goto done;\n }\n len = ctx.outvallen;\n struct pogocache_store_opts sopts = {\n .time = now,\n .expires = ctx.expires,\n .flags = ctx.flags,\n };\n status = pogocache_store(batch, key, keylen, ctx.outval, ctx.outvallen, \n &sopts);\n xfree(ctx.outval);\n }\n if (status == POGOCACHE_NOMEM) {\n conn_write_error(conn, ERR_OUT_OF_MEMORY);\n goto done;\n }\n assert(status == POGOCACHE_INSERTED || status == POGOCACHE_REPLACED);\n if (proto == PROTO_POSTGRES) {\n pg_write_completef(conn, \"%s %zu\", prepend?\"PREPEND\":\"APPEND\", len);\n pg_write_ready(conn, 'I');\n } else if (proto == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \"STORED\\r\\n\");\n } else {\n conn_write_int(conn, len);\n }\ndone:\n pogocache_end(batch);\n}\n\nstatic void cmdPREPEND(struct conn *conn, struct args *args) {\n cmdAPPEND(conn, args);\n}\n\nstatic void cmdAUTH(struct conn *conn, struct args *args) {\n stat_auth_cmds_incr(0);\n if (!argeq(args, 0, \"auth\")) {\n stat_auth_errors_incr(0);\n goto noauth;\n }\n if (args->len == 3) {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n if (args->len > 3) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n }\n if (args->len == 1) {\n stat_auth_errors_incr(0);\n conn_write_error(conn, ERR_WRONG_NUM_ARGS);\n return;\n }\n if (args->bufs[1].len != strlen(auth) || \n memcmp(auth, args->bufs[1].data, args->bufs[1].len) != 0)\n {\n stat_auth_errors_incr(0);\n goto wrongpass;\n }\n conn_setauth(conn, true);\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_complete(conn, \"AUTH OK\");\n pg_write_ready(conn, 'I');\n } else {\n conn_write_string(conn, \"OK\");\n }\n return;\nnoauth:\n if (conn_proto(conn) == PROTO_MEMCACHE) {\n conn_write_raw_cstr(conn, \n \"CLIENT_ERROR Authentication required\\r\\n\");\n } else {\n conn_write_error(conn, \"NOAUTH Authentication required.\");\n }\n return;\nwrongpass:\n conn_write_error(conn, \n \"WRONGPASS invalid username-password pair or user is disabled.\");\n}\n\nstruct stats {\n // use the args type as a list.\n struct args args;\n};\n\nstatic void stats_begin(struct stats *stats) {\n memset(stats, 0, sizeof(struct stats));\n}\n\nstatic void stats_end(struct stats *stats, struct conn *conn) {\n if (conn_proto(conn) == PROTO_POSTGRES) {\n pg_write_row_desc(conn, (const char*[]){ \"stat\", \"value\" }, 2);\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n pg_write_row_data(conn, (const char*[]){ stat, val }, \n (size_t[]){ strlen(stat), strlen(val) }, 2);\n }\n pg_write_completef(conn, \"STATS %zu\", stats->args.len);\n pg_write_ready(conn, 'I');\n } else if (conn_proto(conn) == PROTO_MEMCACHE) {\n char line[512];\n for (size_t i = 0; i < stats->args.len; i++) {\n char *stat = stats->args.bufs[i].data;\n size_t n = snprintf(line, sizeof(line), \"STAT %s\\r\\n\", stat);\n conn_write_raw(conn, line, n);\n }\n conn_write_raw_cstr(conn, \"END\\r\\n\");\n } else {\n conn_write_array(conn, stats->args.len);\n for (size_t i = 0; i < stats->args.len; i++) {\n conn_write_array(conn, 2);\n char *key = stats->args.bufs[i].data;\n char *space = strchr(key, ' ');\n char *val = \"\";\n if (space) {\n *space = '\\0';\n val = space+1;\n }\n conn_write_bulk_cstr(conn, key);\n conn_write_bulk_cstr(conn, val);\n }\n }\n args_free(&stats->args);\n}\n\nstatic void stats_printf(struct stats *stats, const char *format, ...) {\n // initializing list pointer\n char line[512];\n va_list ap;\n va_start(ap, format);\n size_t len = vsnprintf(line, sizeof(line)-1, format, ap);\n va_end(ap);\n args_append(&stats->args, line, len+1, false); // include null-terminator\n}\n\nstatic void stats(struct conn *conn) {\n struct stats stats;\n stats_begin(&stats);\n stats_printf(&stats, \"pid %d\", getpid());\n stats_printf(&stats, \"uptime %.0f\", (sys_now()-procstart)/1e9);\n stats_printf(&stats, \"time %.0f\", sys_unixnow()/1e9);\n stats_printf(&stats, \"product %s\", \"pogocache\");\n stats_printf(&stats, \"version %s\", version);\n stats_printf(&stats, \"githash %s\", githash);\n stats_printf(&stats, \"pointer_size %zu\", sizeof(uintptr_t)*8);\n struct rusage usage;\n if (getrusage(RUSAGE_SELF, &usage) == 0) {\n stats_printf(&stats, \"rusage_user %ld.%06ld\",\n usage.ru_utime.tv_sec, usage.ru_utime.tv_usec);\n stats_printf(&stats, \"rusage_system %ld.%06ld\",\n usage.ru_stime.tv_sec, usage.ru_stime.tv_usec);\n }\n stats_printf(&stats, \"max_connections %zu\", maxconns);\n stats_printf(&stats, \"curr_connections %zu\", net_nconns());\n stats_printf(&stats, \"total_connections %zu\", net_tconns());\n stats_printf(&stats, \"rejected_connections %zu\", net_rconns());\n stats_printf(&stats, \"cmd_get %\" PRIu64, stat_cmd_get());\n stats_printf(&stats, \"cmd_set %\" PRIu64, stat_cmd_set());\n stats_printf(&stats, \"cmd_flush %\" PRIu64, stat_cmd_flush());\n stats_printf(&stats, \"cmd_touch %\" PRIu64, stat_cmd_touch());\n stats_printf(&stats, \"get_hits %\" PRIu64, stat_get_hits());\n stats_printf(&stats, \"get_misses %\" PRIu64, stat_get_misses());\n stats_printf(&stats, \"delete_misses %\" PRIu64, stat_delete_misses());\n stats_printf(&stats, \"delete_hits %\" PRIu64, stat_delete_hits());\n stats_printf(&stats, \"incr_misses %\" PRIu64, stat_incr_misses());\n stats_printf(&stats, \"incr_hits %\" PRIu64, stat_incr_hits());\n stats_printf(&stats, \"decr_misses %\" PRIu64, stat_decr_misses());\n stats_printf(&stats, \"decr_hits %\" PRIu64, stat_decr_hits());\n stats_printf(&stats, \"touch_hits %\" PRIu64, stat_touch_hits());\n stats_printf(&stats, \"touch_misses %\" PRIu64, stat_touch_misses());\n stats_printf(&stats, \"store_too_large %\" PRIu64, stat_store_too_large());\n stats_printf(&stats, \"store_no_memory %\" PRIu64, stat_store_no_memory());\n stats_printf(&stats, \"auth_cmds %\" PRIu64, stat_auth_cmds());\n stats_printf(&stats, \"auth_errors %\" PRIu64, stat_auth_errors());\n stats_printf(&stats, \"threads %d\", nthreads);\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n stats_printf(&stats, \"rss %zu\", meminfo.rss);\n struct pogocache_size_opts sopts = { .entriesonly=true };\n stats_printf(&stats, \"bytes %zu\", pogocache_size(cache, &sopts));\n stats_printf(&stats, \"curr_items %zu\", pogocache_count(cache, 0));\n stats_printf(&stats, \"total_items %\" PRIu64, pogocache_total(cache, 0));\n stats_end(&stats, conn);\n}\n\nstatic void cmdSTATS(struct conn *conn, struct args *args) {\n if (args->len == 1) {\n return stats(conn);\n }\n conn_write_error(conn, ERR_SYNTAX_ERROR);\n return;\n}\n\n// Commands hash table. Lazy loaded per thread.\n// Simple open addressing using case-insensitive fnv1a hashes.\nstatic int nbuckets;\nstatic struct cmd *buckets;\n\nstruct cmd {\n const char *name;\n void (*func)(struct conn *conn, struct args *args);\n};\n\nstatic struct cmd cmds[] = {\n { \"set\", cmdSET }, // pg\n { \"get\", cmdGET }, // pg\n { \"del\", cmdDEL }, // pg\n { \"mget\", cmdMGET }, // pg\n { \"mgets\", cmdMGET }, // pg cas detected\n { \"ttl\", cmdTTL }, // pg\n { \"pttl\", cmdTTL }, // pg\n { \"expire\", cmdEXPIRE }, // pg\n { \"setex\", cmdSETEX }, // pg\n { \"dbsize\", cmdDBSIZE }, // pg\n { \"quit\", cmdQUIT }, // pg\n { \"echo\", cmdECHO }, // pg\n { \"exists\", cmdEXISTS }, // pg\n { \"flushdb\", cmdFLUSHALL }, // pg\n { \"flushall\", cmdFLUSHALL }, // pg\n { \"flush\", cmdFLUSHALL }, // pg\n { \"purge\", cmdPURGE }, // pg\n { \"sweep\", cmdSWEEP }, // pg\n { \"keys\", cmdKEYS }, // pg\n { \"ping\", cmdPING }, // pg\n { \"touch\", cmdTOUCH }, // pg\n { \"debug\", cmdDEBUG }, // pg\n { \"incrby\", cmdINCRBY }, // pg\n { \"decrby\", cmdDECRBY }, // pg\n { \"incr\", cmdINCR }, // pg\n { \"decr\", cmdDECR }, // pg\n { \"uincrby\", cmdINCRBY }, // pg unsigned detected in signed operation\n { \"udecrby\", cmdDECRBY }, // pg unsigned detected in signed operation\n { \"uincr\", cmdINCR }, // pg unsigned detected in signed operation\n { \"udecr\", cmdDECR }, // pg unsigned detected in signed operation\n { \"append\", cmdAPPEND }, // pg\n { \"prepend\", cmdPREPEND }, // pg\n { \"auth\", cmdAUTH }, // pg\n { \"save\", cmdSAVELOAD }, // pg\n { \"load\", cmdSAVELOAD }, // pg\n { \"stats\", cmdSTATS }, // pg memcache style stats\n};\n\nstatic void build_commands_table(void) {\n static __thread bool buckets_ready = false;\n static pthread_mutex_t cmd_build_lock = PTHREAD_MUTEX_INITIALIZER;\n static bool built = false;\n if (!buckets_ready) {\n pthread_mutex_lock(&cmd_build_lock);\n if (!built) {\n int ncmds = sizeof(cmds)/sizeof(struct cmd);\n int n = ncmds*8;\n nbuckets = 2;\n while (nbuckets < n) {\n nbuckets *= 2;\n }\n buckets = xmalloc(nbuckets*sizeof(struct cmd));\n memset(buckets, 0, nbuckets*sizeof(struct cmd));\n uint64_t hash;\n for (int i = 0; i < ncmds; i++) {\n hash = fnv1a_case(cmds[i].name, strlen(cmds[i].name));\n for (int j = 0; j < nbuckets; j++) {\n int k = (j+hash)&(nbuckets-1);\n if (!buckets[k].name) {\n buckets[k] = cmds[i];\n break;\n }\n }\n }\n built = true;\n }\n pthread_mutex_unlock(&cmd_build_lock);\n buckets_ready = true;\n }\n}\n\nstatic struct cmd *get_cmd(const char *name, size_t namelen) {\n build_commands_table();\n uint32_t hash = fnv1a_case(name, namelen);\n int j = hash&(nbuckets-1);\n while (1) {\n if (!buckets[j].name) {\n return 0;\n }\n if (argeq_bytes(name, namelen, buckets[j].name)) {\n return &buckets[j];\n }\n j++;\n }\n}\n\nvoid evcommand(struct conn *conn, struct args *args) {\n if (useauth && !conn_auth(conn)) {\n if (conn_proto(conn) == PROTO_HTTP) {\n // Let HTTP traffic through.\n // The request has already been authorized in http.c\n } else {\n cmdAUTH(conn, args);\n return;\n }\n }\n if (verb > 1) {\n if (!argeq(args, 0, \"auth\")) {\n args_print(args);\n }\n }\n struct cmd *cmd = get_cmd(args->bufs[0].data, args->bufs[0].len);\n if (cmd) {\n cmd->func(conn, args);\n } else {\n if (verb > 0) {\n printf(\"# Unknown command '%.*s'\\n\", (int)args->bufs[0].len,\n args->bufs[0].data);\n }\n char errmsg[128];\n snprintf(errmsg, sizeof(errmsg), \"ERR unknown command '%.*s'\", \n (int)args->bufs[0].len, args->bufs[0].data);\n conn_write_error(conn, errmsg);\n }\n}\n"], ["/pogocache/src/net.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit net.c provides most network functionality, including listening on ports,\n// thread creation, event queue handling, and reading & writing sockets.\n#define _GNU_SOURCE\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#ifdef __linux__\n#include \n#include \n#include \n#include \n#else\n#include \n#endif\n\n#include \"uring.h\"\n#include \"stats.h\"\n#include \"net.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"xmalloc.h\"\n\n#define PACKETSIZE 16384\n#define MINURINGEVENTS 2 // there must be at least 2 events for uring use\n\nextern const int verb;\n\nstatic int setnonblock(int fd) {\n int flags = fcntl(fd, F_GETFL, 0);\n if (flags == -1) {\n return -1;\n }\n return fcntl(fd, F_SETFL, flags | O_NONBLOCK);\n}\n\nstatic int settcpnodelay(int fd, bool nodelay) {\n int val = nodelay;\n return setsockopt(fd, SOL_SOCKET, TCP_NODELAY, &val, sizeof(val)) == 0;\n}\n\nstatic int setquickack(int fd, bool quickack) {\n#if defined(__linux__)\n int val = quickack;\n return setsockopt(fd, SOL_SOCKET, TCP_QUICKACK, &val, sizeof(val)) == 0;\n#else\n (void)fd, (void)quickack;\n return 0;\n#endif\n}\n\nstatic int setkeepalive(int fd, bool keepalive) {\n int val = keepalive;\n if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val))) {\n return -1;\n }\n#if defined(__linux__)\n if (!keepalive) {\n return 0;\n }\n // tcp_keepalive_time\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &(int){300}, sizeof(int))) \n {\n return -1;\n }\n // tcp_keepalive_intvl\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &(int){30}, sizeof(int)))\n {\n return -1;\n }\n // tcp_keepalive_probes\n if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &(int){3}, sizeof(int))) {\n return -1;\n }\n#endif\n return 0;\n}\n\n#ifdef __linux__\ntypedef struct epoll_event event_t;\n#else\ntypedef struct kevent event_t;\n#endif\n\nstatic int event_fd(event_t *ev) {\n#ifdef __linux__\n return ev->data.fd;\n#else\n return ev->ident;\n#endif\n}\n\nstatic int getevents(int fd, event_t evs[], int nevs, bool wait_forever, \n int64_t timeout)\n{\n if (wait_forever) {\n#ifdef __linux__\n return epoll_wait(fd, evs, nevs, -1);\n#else\n return kevent(fd, NULL, 0, evs, nevs, 0);\n#endif\n } else {\n timeout = timeout < 0 ? 0 : \n timeout > 900000000 ? 900000000 : // 900ms\n timeout;\n#ifdef __linux__\n timeout = timeout / 1000000;\n return epoll_wait(fd, evs, nevs, timeout);\n#else\n struct timespec timespec = { .tv_nsec = timeout };\n return kevent(fd, NULL, 0, evs, nevs, ×pec);\n#endif\n }\n}\n\nstatic int addread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN | EPOLLEXCLUSIVE;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delread(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLIN;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_READ,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int addwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_ADD,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int delwrite(int qfd, int fd) {\n#ifdef __linux__\n struct epoll_event ev = { 0 };\n ev.events = EPOLLOUT;\n ev.data.fd = fd;\n return epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &ev);\n#else\n struct kevent ev={.filter=EVFILT_WRITE,.flags=EV_DELETE,.ident=(fd)};\n return kevent(qfd, &ev, 1, NULL, 0, NULL);\n#endif\n}\n\nstatic int evqueue(void) {\n#ifdef __linux__\n return epoll_create1(0);\n#else\n return kqueue();\n#endif\n}\n\nstruct bgworkctx { \n void (*work)(void *udata);\n void (*done)(struct net_conn *conn, void *udata);\n struct net_conn *conn;\n void *udata;\n bool writer;\n};\n\n// static void bgdone(struct bgworkctx *bgctx);\n\nstruct net_conn {\n int fd;\n struct net_conn *next; // for hashmap bucket\n bool closed;\n struct tls *tls;\n void *udata;\n char *out;\n size_t outlen;\n size_t outcap;\n struct bgworkctx *bgctx;\n struct qthreadctx *ctx;\n unsigned stat_cmd_get;\n unsigned stat_cmd_set;\n unsigned stat_get_hits;\n unsigned stat_get_misses;\n};\n\nstatic struct net_conn *conn_new(int fd, struct qthreadctx *ctx) {\n struct net_conn *conn = xmalloc(sizeof(struct net_conn));\n memset(conn, 0, sizeof(struct net_conn));\n conn->fd = fd;\n conn->ctx = ctx;\n return conn;\n}\n\nstatic void conn_free(struct net_conn *conn) {\n if (conn) {\n if (conn->out) {\n xfree(conn->out);\n }\n xfree(conn);\n }\n}\n\nvoid net_conn_out_ensure(struct net_conn *conn, size_t amount) {\n if (conn->outcap-conn->outlen >= amount) {\n return;\n }\n size_t cap = conn->outcap == 0 ? 16 : conn->outcap * 2;\n while (cap-conn->outlen < amount) {\n cap *= 2;\n }\n char *out = xmalloc(cap);\n memcpy(out, conn->out, conn->outlen);\n xfree(conn->out);\n conn->out = out;\n conn->outcap = cap;\n}\n\nvoid net_conn_out_write_byte_nocheck(struct net_conn *conn, char byte) {\n conn->out[conn->outlen++] = byte;\n}\n\nvoid net_conn_out_write_byte(struct net_conn *conn, char byte) {\n if (conn->outcap == conn->outlen) {\n net_conn_out_ensure(conn, 1);\n }\n net_conn_out_write_byte_nocheck(conn, byte);\n}\n\nvoid net_conn_out_write_nocheck(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n memcpy(conn->out+conn->outlen, data, nbytes);\n conn->outlen += nbytes;\n}\n\nvoid net_conn_out_write(struct net_conn *conn, const void *data,\n size_t nbytes)\n{\n if (conn->outcap-conn->outlen < nbytes) {\n net_conn_out_ensure(conn, nbytes);\n }\n net_conn_out_write_nocheck(conn, data, nbytes);\n}\n\nchar *net_conn_out(struct net_conn *conn) {\n return conn->out;\n}\n\nsize_t net_conn_out_len(struct net_conn *conn) {\n return conn->outlen;\n}\n\nsize_t net_conn_out_cap(struct net_conn *conn) {\n return conn->outcap;\n}\n\nvoid net_conn_out_setlen(struct net_conn *conn, size_t len) {\n assert(len < conn->outcap);\n conn->outlen = len;\n}\n\n\nbool net_conn_isclosed(struct net_conn *conn) {\n return conn->closed;\n}\n\nvoid net_conn_close(struct net_conn *conn) {\n conn->closed = true;\n}\n\nvoid net_conn_setudata(struct net_conn *conn, void *udata) {\n conn->udata = udata;\n}\n\nvoid *net_conn_udata(struct net_conn *conn) {\n return conn->udata;\n}\n\nstatic uint64_t hashfd(int fd) {\n return mix13((uint64_t)fd);\n}\n\n// map of connections\nstruct cmap {\n struct net_conn **buckets;\n size_t nbuckets;\n size_t len;\n};\n\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn);\n\nstatic void cmap_grow(struct cmap *cmap) {\n struct cmap cmap2 = { 0 };\n cmap2.nbuckets = cmap->nbuckets*2;\n size_t size = cmap2.nbuckets * sizeof(struct net_conn*);\n cmap2.buckets = xmalloc(size);\n memset(cmap2.buckets, 0, cmap2.nbuckets*sizeof(struct net_conn*));\n for (size_t i = 0; i < cmap->nbuckets; i++) {\n struct net_conn *conn = cmap->buckets[i];\n while (conn) {\n struct net_conn *next = conn->next;\n conn->next = 0;\n cmap_insert(&cmap2, conn);\n conn = next;\n }\n }\n xfree(cmap->buckets);\n memcpy(cmap, &cmap2, sizeof(struct cmap));\n}\n\n// Insert a connection into a map. \n// The connection MUST NOT exist in the map.\nstatic void cmap_insert(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n if (cmap->len >= cmap->nbuckets-(cmap->nbuckets>>2)) { // 75% load factor\n // if (cmap->len >= cmap->nbuckets) { // 100% load factor\n cmap_grow(cmap);\n }\n size_t i = hash % cmap->nbuckets;\n conn->next = cmap->buckets[i];\n cmap->buckets[i] = conn;\n cmap->len++;\n}\n\n// Return the connection or NULL if not exists.\nstatic struct net_conn *cmap_get(struct cmap *cmap, int fd) {\n uint32_t hash = hashfd(fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *conn = cmap->buckets[i];\n while (conn && conn->fd != fd) {\n conn = conn->next;\n }\n return conn;\n}\n\n// Delete connection from map. \n// The connection MUST exist in the map.\nstatic void cmap_delete(struct cmap *cmap, struct net_conn *conn) {\n uint32_t hash = hashfd(conn->fd);\n size_t i = hash % cmap->nbuckets;\n struct net_conn *prev = 0;\n struct net_conn *iter = cmap->buckets[i];\n while (iter != conn) {\n prev = iter;\n iter = iter->next;\n }\n if (prev) {\n prev->next = iter->next;\n } else {\n cmap->buckets[i] = iter->next;\n }\n}\n\nstatic atomic_size_t nconns = 0;\nstatic atomic_size_t tconns = 0;\nstatic atomic_size_t rconns = 0;\n\nstatic pthread_mutex_t tls_ready_fds_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic int tls_ready_fds_cap = 0;\nstatic int tls_ready_fds_len = 0;\nstatic int *tls_ready_fds = 0;\n\nstatic void save_tls_fd(int fd) {\n pthread_mutex_lock(&tls_ready_fds_lock);\n if (tls_ready_fds_len == tls_ready_fds_cap) {\n tls_ready_fds_cap *= 2;\n if (tls_ready_fds_cap == 0) {\n tls_ready_fds_cap = 8;\n }\n tls_ready_fds = xrealloc(tls_ready_fds, tls_ready_fds_cap*sizeof(int));\n }\n tls_ready_fds[tls_ready_fds_len++] = fd;\n pthread_mutex_unlock(&tls_ready_fds_lock);\n}\n\nstatic bool del_tls_fd(int fd) {\n bool found = false;\n pthread_mutex_lock(&tls_ready_fds_lock);\n for (int i = 0; i < tls_ready_fds_len; i++) {\n if (tls_ready_fds[i] == fd) {\n tls_ready_fds[i] = tls_ready_fds[tls_ready_fds_len-1];\n tls_ready_fds_len--;\n found = true;\n break;\n }\n }\n pthread_mutex_unlock(&tls_ready_fds_lock);\n return found;\n}\n\nstruct qthreadctx {\n pthread_t th;\n int qfd;\n int index;\n int maxconns;\n int *sfd; // three entries\n bool tcpnodelay;\n bool keepalive;\n bool quickack;\n int queuesize;\n const char *unixsock;\n void *udata;\n bool uring;\n#ifndef NOURING\n struct io_uring ring;\n#endif\n void(*data)(struct net_conn*,const void*,size_t,void*);\n void(*opened)(struct net_conn*,void*);\n void(*closed)(struct net_conn*,void*);\n int nevents;\n event_t *events;\n atomic_int nconns;\n int ntlsconns;\n char *inpkts;\n struct net_conn **qreads;\n struct net_conn **qins;\n struct net_conn **qattachs;\n struct net_conn **qouts;\n struct net_conn **qcloses;\n char **qinpkts;\n int *qinpktlens; \n int nqreads;\n int nqins;\n int nqcloses;\n int nqattachs;\n int nqouts;\n int nthreads;\n \n uint64_t stat_cmd_get;\n uint64_t stat_cmd_set;\n uint64_t stat_get_hits;\n uint64_t stat_get_misses;\n\n struct qthreadctx *ctxs;\n struct cmap cmap;\n};\n\nstatic atomic_uint_fast64_t g_stat_cmd_get = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_set = 0;\nstatic atomic_uint_fast64_t g_stat_get_hits = 0;\nstatic atomic_uint_fast64_t g_stat_get_misses = 0;\n\ninline\nstatic void sumstats(struct net_conn *conn, struct qthreadctx *ctx) {\n ctx->stat_cmd_get += conn->stat_cmd_get;\n conn->stat_cmd_get = 0;\n ctx->stat_cmd_set += conn->stat_cmd_set;\n conn->stat_cmd_set = 0;\n ctx->stat_get_hits += conn->stat_get_hits;\n conn->stat_get_hits = 0;\n ctx->stat_get_misses += conn->stat_get_misses;\n conn->stat_get_misses = 0;\n}\n\ninline\nstatic void sumstats_global(struct qthreadctx *ctx) {\n atomic_fetch_add_explicit(&g_stat_cmd_get, ctx->stat_cmd_get, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_get = 0;\n atomic_fetch_add_explicit(&g_stat_cmd_set, ctx->stat_cmd_set, \n __ATOMIC_RELAXED);\n ctx->stat_cmd_set = 0;\n atomic_fetch_add_explicit(&g_stat_get_hits, ctx->stat_get_hits, \n __ATOMIC_RELAXED);\n ctx->stat_get_hits = 0;\n atomic_fetch_add_explicit(&g_stat_get_misses, ctx->stat_get_misses, \n __ATOMIC_RELAXED);\n ctx->stat_get_misses = 0;\n}\n\nuint64_t stat_cmd_get(void) {\n uint64_t x = atomic_load_explicit(&g_stat_cmd_get, __ATOMIC_RELAXED);\n return x;\n}\n\nuint64_t stat_cmd_set(void) {\n return atomic_load_explicit(&g_stat_cmd_set, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_hits(void) {\n return atomic_load_explicit(&g_stat_get_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_misses(void) {\n return atomic_load_explicit(&g_stat_get_misses, __ATOMIC_RELAXED);\n}\n\ninline\nstatic void qreset(struct qthreadctx *ctx) {\n ctx->nqreads = 0;\n ctx->nqins = 0;\n ctx->nqcloses = 0;\n ctx->nqouts = 0;\n ctx->nqattachs = 0;\n}\n\ninline\nstatic void qaccept(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nevents; i++) {\n int fd = event_fd(&ctx->events[i]);\n struct net_conn *conn = cmap_get(&ctx->cmap, fd);\n if (!conn) {\n if ((fd == ctx->sfd[0] || fd == ctx->sfd[1] || fd == ctx->sfd[2])) {\n int sfd = fd;\n fd = accept(fd, 0, 0);\n if (fd == -1) {\n continue;\n }\n if (setnonblock(fd) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[0] || sfd == ctx->sfd[2]) {\n if (setkeepalive(fd, ctx->keepalive) == -1) {\n close(fd);\n continue;\n }\n if (settcpnodelay(fd, ctx->tcpnodelay) == -1) {\n close(fd);\n continue;\n }\n if (setquickack(fd, ctx->quickack) == -1) {\n close(fd);\n continue;\n }\n if (sfd == ctx->sfd[2]) {\n save_tls_fd(fd);\n }\n }\n static atomic_uint_fast64_t next_ctx_index = 0;\n int idx = atomic_fetch_add(&next_ctx_index, 1) % ctx->nthreads;\n if (addread(ctx->ctxs[idx].qfd, fd) == -1) {\n if (sfd == ctx->sfd[2]) {\n del_tls_fd(fd);\n }\n close(fd);\n continue;\n }\n continue;\n }\n size_t xnconns = atomic_fetch_add(&nconns, 1);\n if (xnconns >= (size_t)ctx->maxconns) {\n // rejected\n atomic_fetch_add(&rconns, 1);\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n continue;\n }\n bool istls = del_tls_fd(fd);\n conn = conn_new(fd, ctx);\n if (istls) {\n if (!tls_accept(conn->fd, &conn->tls)) {\n atomic_fetch_sub(&nconns, 1);\n close(fd);\n conn_free(conn);\n continue;\n }\n ctx->ntlsconns++;\n }\n atomic_fetch_add_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_add_explicit(&tconns, 1, __ATOMIC_RELEASE);\n cmap_insert(&ctx->cmap, conn);\n ctx->opened(conn, ctx->udata);\n }\n if (conn->bgctx) {\n // BGWORK(2)\n // The connection has been added back to the event loop, but it\n // needs to be attached and restated.\n ctx->qattachs[ctx->nqattachs++] = conn;\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void handle_read(ssize_t n, char *pkt, struct net_conn *conn,\n struct qthreadctx *ctx)\n{\n assert(conn->outlen == 0);\n assert(conn->bgctx == 0);\n if (n <= 0) {\n if (n == 0 || errno != EAGAIN) {\n // read failed, close connection\n ctx->qcloses[ctx->nqcloses++] = conn;\n return;\n }\n assert(n == -1 && errno == EAGAIN);\n // even though there's an EAGAIN, still call the user data event\n // handler with an empty packet \n n = 0;\n }\n pkt[n] = '\\0';\n ctx->qins[ctx->nqins] = conn;\n ctx->qinpkts[ctx->nqins] = pkt;\n ctx->qinpktlens[ctx->nqins] = n;\n ctx->nqins++;\n}\n\ninline \nstatic void flush_conn(struct net_conn *conn, size_t written) {\n while (written < conn->outlen) {\n ssize_t n;\n if (conn->tls) {\n n = tls_write(conn->tls, conn->fd, conn->out+written, \n conn->outlen-written);\n } else {\n n = write(conn->fd, conn->out+written, conn->outlen-written);\n }\n if (n == -1) {\n if (errno == EAGAIN) {\n continue;\n }\n conn->closed = true;\n break;\n }\n written += n;\n }\n // either everything was written or the socket is closed\n conn->outlen = 0;\n}\n\ninline\nstatic void qattach(struct qthreadctx *ctx) {\n for (int i = 0; i < ctx->nqattachs; i++) {\n // BGWORK(3)\n // A bgworker has finished, make sure it's added back into the \n // event loop in the correct state.\n struct net_conn *conn = ctx->qattachs[i];\n struct bgworkctx *bgctx = conn->bgctx;\n bgctx->done(conn, bgctx->udata);\n conn->bgctx = 0;\n assert(bgctx);\n xfree(bgctx);\n int ret = delwrite(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n ret = addread(conn->ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n } else {\n ctx->qreads[ctx->nqreads++] = conn;\n }\n }\n}\n\ninline\nstatic void qread(struct qthreadctx *ctx) {\n // Read incoming socket data\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // read incoming using uring\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_read(sqe, conn->fd, pkt, PACKETSIZE-1, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n assert(ret == ctx->nqreads);\n for (int i = 0; i < ctx->nqreads; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n = cqe->res;\n if (n < 0) {\n errno = -n;\n n = -1;\n }\n handle_read(n, pkt, conn, ctx);\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // read incoming data using standard syscalls.\n for (int i = 0; i < ctx->nqreads; i++) {\n struct net_conn *conn = ctx->qreads[i];\n char *pkt = ctx->inpkts+(i*PACKETSIZE);\n ssize_t n;\n if (conn->tls) {\n n = tls_read(conn->tls, conn->fd, pkt, PACKETSIZE-1);\n } else {\n n = read(conn->fd, pkt, PACKETSIZE-1);\n }\n handle_read(n, pkt, conn, ctx);\n }\n#ifndef NOURING\n }\n#endif\n}\n\n\ninline\nstatic void qprocess(struct qthreadctx *ctx) {\n // process all new incoming data\n for (int i = 0; i < ctx->nqins; i++) {\n struct net_conn *conn = ctx->qins[i];\n char *p = ctx->qinpkts[i];\n int n = ctx->qinpktlens[i];\n ctx->data(conn, p, n, ctx->udata);\n sumstats(conn, ctx);\n if (conn->bgctx) {\n // BGWORK(1)\n // Connection entered background mode.\n // This means the connection is no longer in the event queue but\n // is still owned by this qthread. Once the bgwork is done the \n // connection will be added back to the queue with addwrite.\n } else if (conn->outlen > 0) {\n ctx->qouts[ctx->nqouts++] = conn;\n } else if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n}\n\ninline\nstatic void qprewrite(struct qthreadctx *ctx) {\n (void)ctx;\n // TODO: perform any prewrite operations\n}\n\ninline\nstatic void qwrite(struct qthreadctx *ctx) {\n // Flush all outgoing socket data.\n#ifndef NOURING\n if (ctx->uring && ctx->nqreads >= MINURINGEVENTS && ctx->ntlsconns == 0) {\n // write outgoing using uring\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n struct io_uring_sqe *sqe = io_uring_get_sqe(&ctx->ring);\n io_uring_prep_write(sqe, conn->fd, conn->out, conn->outlen, 0);\n }\n int ret = io_uring_submit(&ctx->ring);\n if (ret < 0) {\n errno = -ret;\n perror(\"# io_uring_submit\");\n abort();\n }\n for (int i = 0; i < ctx->nqouts; i++) {\n struct io_uring_cqe *cqe;\n if (io_uring_wait_cqe(&ctx->ring, &cqe) < 0) {\n perror(\"# io_uring_wait_cqe\");\n abort();\n }\n struct net_conn *conn = ctx->qouts[i];\n ssize_t n = cqe->res;\n if (n == -EAGAIN) {\n n = 0;\n }\n if (n < 0) {\n conn->closed = true;\n } else {\n // Any extra data must be flushed using syscall write.\n flush_conn(conn, n);\n }\n // Either everything was written or the socket is closed\n conn->outlen = 0;\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n io_uring_cqe_seen(&ctx->ring, cqe);\n }\n } else {\n#endif\n // Write data using write syscall\n for (int i = 0; i < ctx->nqouts; i++) {\n struct net_conn *conn = ctx->qouts[i];\n flush_conn(conn, 0);\n if (conn->closed) {\n ctx->qcloses[ctx->nqcloses++] = conn;\n }\n }\n#ifndef NOURING\n }\n#endif\n}\n\ninline\nstatic void qclose(struct qthreadctx *ctx) {\n // Close all sockets that need to be closed\n for (int i = 0; i < ctx->nqcloses; i++) {\n struct net_conn *conn = ctx->qcloses[i];\n ctx->closed(conn, ctx->udata);\n if (conn->tls) {\n tls_close(conn->tls, conn->fd);\n ctx->ntlsconns--;\n } else {\n close(conn->fd);\n }\n cmap_delete(&ctx->cmap, conn);\n atomic_fetch_sub_explicit(&nconns, 1, __ATOMIC_RELEASE);\n atomic_fetch_sub_explicit(&ctx->nconns, 1, __ATOMIC_RELEASE);\n conn_free(conn);\n }\n}\n\nstatic void *qthread(void *arg) {\n struct qthreadctx *ctx = arg;\n#ifndef NOURING\n if (ctx->uring) {\n if (io_uring_queue_init(ctx->queuesize, &ctx->ring, 0) < 0) {\n perror(\"# io_uring_queue_init\");\n abort();\n }\n }\n#endif\n // connection map\n memset(&ctx->cmap, 0, sizeof(struct cmap));\n ctx->cmap.nbuckets = 64;\n size_t size = ctx->cmap.nbuckets*sizeof(struct net_conn*);\n ctx->cmap.buckets = xmalloc(size);\n memset(ctx->cmap.buckets, 0, ctx->cmap.nbuckets*sizeof(struct net_conn*));\n\n ctx->events = xmalloc(sizeof(event_t)*ctx->queuesize);\n ctx->qreads = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->inpkts = xmalloc(PACKETSIZE*ctx->queuesize);\n ctx->qins = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qinpkts = xmalloc(sizeof(char*)*ctx->queuesize);\n ctx->qinpktlens = xmalloc(sizeof(int)*ctx->queuesize);\n ctx->qcloses = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qouts = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n ctx->qattachs = xmalloc(sizeof(struct net_conn*)*ctx->queuesize);\n\n while (1) {\n sumstats_global(ctx);\n ctx->nevents = getevents(ctx->qfd, ctx->events, ctx->queuesize, 1, 0);\n if (ctx->nevents <= 0) {\n if (ctx->nevents == -1 && errno != EINTR) {\n perror(\"# getevents\");\n abort();\n }\n continue;\n }\n // reset, accept, attach, read, process, prewrite, write, close\n qreset(ctx); // reset the step queues\n qaccept(ctx); // accept incoming connections\n qattach(ctx); // attach bg workers. uncommon\n qread(ctx); // read from sockets\n qprocess(ctx); // process new socket data\n qprewrite(ctx); // perform any prewrite operations, such as fsync\n qwrite(ctx); // write to sockets\n qclose(ctx); // close any sockets that need closing\n }\n return 0;\n}\n\nstatic int listen_tcp(const char *host, const char *port, bool reuseport, \n int backlog)\n{\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return 0;\n }\n int ret;\n host = host ? host : \"127.0.0.1\";\n port = port ? port : \"0\";\n struct addrinfo hints = { 0 }, *addrs;\n hints.ai_family = AF_UNSPEC; \n hints.ai_socktype = SOCK_STREAM;\n hints.ai_protocol = IPPROTO_TCP;\n ret = getaddrinfo(host, port, &hints, &addrs);\n if (ret != 0) {\n fprintf(stderr, \"# getaddrinfo: %s: %s:%s\", gai_strerror(ret), host,\n port);\n abort();\n }\n struct addrinfo *ainfo = addrs;\n while (ainfo->ai_family != PF_INET) {\n ainfo = ainfo->ai_next;\n }\n assert(ainfo);\n int fd = socket(ainfo->ai_family, ainfo->ai_socktype, ainfo->ai_protocol);\n if (fd == -1) {\n perror(\"# socket(tcp)\");\n abort();\n }\n if (reuseport) {\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &(int){1}, \n sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseport)\");\n abort();\n }\n }\n ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &(int){1},sizeof(int));\n if (ret == -1) {\n perror(\"# setsockopt(reuseaddr)\");\n abort();\n }\n ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n ret = bind(fd, ainfo->ai_addr, ainfo->ai_addrlen);\n if (ret == -1) {\n fprintf(stderr, \"# bind(tcp): %s:%s\", host, port);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(tcp): %s:%s\", host, port);\n abort();\n }\n freeaddrinfo(addrs);\n return fd;\n}\n\nstatic int listen_unixsock(const char *unixsock, int backlog) {\n if (!unixsock || !*unixsock) {\n return 0;\n }\n struct sockaddr_un unaddr;\n int fd = socket(AF_UNIX, SOCK_STREAM, 0);\n if (fd == -1) {\n perror(\"# socket(unix)\");\n abort();\n }\n memset(&unaddr, 0, sizeof(struct sockaddr_un));\n unaddr.sun_family = AF_UNIX;\n strncpy(unaddr.sun_path, unixsock, sizeof(unaddr.sun_path) - 1);\n int ret = setnonblock(fd);\n if (ret == -1) {\n perror(\"# setnonblock\");\n abort();\n }\n unlink(unixsock);\n ret = bind(fd, (struct sockaddr *)&unaddr, sizeof(struct sockaddr_un));\n if (ret == -1) {\n fprintf(stderr, \"# bind(unix): %s\", unixsock);\n abort();\n }\n ret = listen(fd, backlog);\n if (ret == -1) {\n fprintf(stderr, \"# listen(unix): %s\", unixsock);\n abort();\n }\n return fd;\n}\n\nstatic atomic_uintptr_t all_ctxs = 0;\n\n// current connections\nsize_t net_nconns(void) {\n return atomic_load_explicit(&nconns, __ATOMIC_ACQUIRE);\n}\n\n// total connections ever\nsize_t net_tconns(void) {\n return atomic_load_explicit(&tconns, __ATOMIC_ACQUIRE);\n}\n\n// total rejected connections ever\nsize_t net_rconns(void) {\n return atomic_load_explicit(&rconns, __ATOMIC_ACQUIRE);\n}\n\nstatic void warmupunix(const char *unixsock, int nsocks) {\n if (!unixsock || !*unixsock) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n socks[i] = socket(AF_UNIX, SOCK_STREAM, 0);\n if (socks[i] == -1) {\n socks[i] = 0;\n continue;\n }\n struct sockaddr_un addr;\n memset(&addr, 0, sizeof(struct sockaddr_un));\n addr.sun_family = AF_UNIX;\n strncpy(addr.sun_path, unixsock, sizeof(addr.sun_path) - 1);\n if (connect(socks[i], (struct sockaddr *)&addr, \n sizeof(struct sockaddr_un)) == -1)\n {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup unix socket (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\n\nstatic void warmuptcp(const char *host, const char *port, int nsocks) {\n if (!port || !*port || strcmp(port, \"0\") == 0) {\n return;\n }\n int *socks = xmalloc(nsocks*sizeof(int));\n memset(socks, 0, nsocks*sizeof(int));\n for (int i = 0; i < nsocks; i++) {\n struct addrinfo hints, *res;\n memset(&hints, 0, sizeof(hints));\n hints.ai_family = AF_INET;\n hints.ai_socktype = SOCK_STREAM;\n int err = getaddrinfo(host, port, &hints, &res);\n if (err != 0) {\n continue;\n }\n socks[i] = socket(res->ai_family, res->ai_socktype, res->ai_protocol);\n if (socks[i] == -1) {\n freeaddrinfo(res);\n continue;\n }\n int ret = connect(socks[i], res->ai_addr, res->ai_addrlen);\n freeaddrinfo(res);\n if (ret == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n ssize_t n = write(socks[i], \"+PING\\r\\n\", 7);\n if (n == -1) {\n close(socks[i]);\n socks[i] = 0;\n continue;\n }\n }\n int x = 0;\n for (int i = 0; i < nsocks; i++) {\n if (socks[i] > 0) {\n x++;\n close(socks[i]);\n }\n }\n if (verb > 1) {\n printf(\". Warmup tcp (%d/%d)\\n\", x, nsocks);\n }\n xfree(socks);\n}\n\nstatic void *thwarmup(void *arg) {\n // Perform a warmup of the epoll queues and listeners by making a quick\n // connection to each.\n struct net_opts *opts = arg;\n warmupunix(opts->unixsock, opts->nthreads*2);\n warmuptcp(opts->host, opts->port, opts->nthreads*2);\n return 0;\n}\n\nvoid net_main(struct net_opts *opts) {\n (void)delread;\n int sfd[3] = {\n listen_tcp(opts->host, opts->port, opts->reuseport, opts->backlog),\n listen_unixsock(opts->unixsock, opts->backlog),\n listen_tcp(opts->host, opts->tlsport, opts->reuseport, opts->backlog),\n };\n if (!sfd[0] && !sfd[1] && !sfd[2]) {\n printf(\"# No listeners provided\\n\");\n abort();\n }\n opts->listening(opts->udata);\n struct qthreadctx *ctxs = xmalloc(sizeof(struct qthreadctx)*opts->nthreads);\n memset(ctxs, 0, sizeof(struct qthreadctx)*opts->nthreads);\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n ctx->nthreads = opts->nthreads;\n ctx->tcpnodelay = opts->tcpnodelay;\n ctx->keepalive = opts->keepalive;\n ctx->quickack = opts->quickack;\n ctx->uring = !opts->nouring;\n ctx->ctxs = ctxs;\n ctx->index = i;\n ctx->maxconns = opts->maxconns;\n ctx->sfd = sfd;\n ctx->data = opts->data;\n ctx->udata = opts->udata;\n ctx->opened = opts->opened;\n ctx->closed = opts->closed;\n ctx->qfd = evqueue();\n if (ctx->qfd == -1) {\n perror(\"# evqueue\");\n abort();\n }\n atomic_init(&ctx->nconns, 0);\n for (int j = 0; j < 3; j++) {\n if (sfd[j]) {\n int ret = addread(ctx->qfd, sfd[j]);\n if (ret == -1) {\n perror(\"# addread\");\n abort();\n }\n }\n }\n ctx->unixsock = opts->unixsock;\n ctx->queuesize = opts->queuesize;\n }\n atomic_store(&all_ctxs, (uintptr_t)(void*)ctxs);\n opts->ready(opts->udata);\n if (!opts->nowarmup) {\n pthread_t th;\n int ret = pthread_create(&th, 0, thwarmup, opts);\n if (ret != -1) {\n pthread_detach(th);\n }\n }\n for (int i = 0; i < opts->nthreads; i++) {\n struct qthreadctx *ctx = &ctxs[i];\n if (i == opts->nthreads-1) {\n qthread(ctx);\n } else {\n int ret = pthread_create(&ctx->th, 0, qthread, ctx);\n if (ret == -1) {\n perror(\"# pthread_create\");\n abort();\n }\n }\n }\n}\n\nstatic void *bgwork(void *arg) {\n struct bgworkctx *bgctx = arg;\n bgctx->work(bgctx->udata);\n // We are not in the same thread context as the event loop that owns this\n // connection. Adding the writer to the queue will allow for the loop\n // thread to gracefully continue the operation and then call the 'done'\n // callback.\n int ret = addwrite(bgctx->conn->ctx->qfd, bgctx->conn->fd);\n assert(ret == 0); (void)ret;\n return 0;\n}\n\n// net_conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool net_conn_bgwork(struct net_conn *conn, void (*work)(void *udata), \n void (*done)(struct net_conn *conn, void *udata), void *udata)\n{\n if (conn->bgctx || conn->closed) {\n return false;\n }\n struct qthreadctx *ctx = conn->ctx;\n int ret = delread(ctx->qfd, conn->fd);\n assert(ret == 0); (void)ret;\n conn->bgctx = xmalloc(sizeof(struct bgworkctx));\n memset(conn->bgctx, 0, sizeof(struct bgworkctx));\n conn->bgctx->conn = conn;\n conn->bgctx->done = done;\n conn->bgctx->work = work;\n conn->bgctx->udata = udata;\n pthread_t th;\n if (pthread_create(&th, 0, bgwork, conn->bgctx) == -1) {\n // Failed to create thread. Revert and return false.\n ret = addread(ctx->qfd, conn->fd);\n assert(ret == 0);\n xfree(conn->bgctx);\n conn->bgctx = 0;\n return false;\n } else {\n pthread_detach(th);\n }\n return true;\n}\n\nbool net_conn_bgworking(struct net_conn *conn) {\n return conn->bgctx != 0;\n}\n\nvoid net_stat_cmd_get_incr(struct net_conn *conn) {\n conn->stat_cmd_get++;\n}\n\nvoid net_stat_cmd_set_incr(struct net_conn *conn) {\n conn->stat_cmd_set++;\n}\n\nvoid net_stat_get_hits_incr(struct net_conn *conn) {\n conn->stat_get_hits++;\n}\n\nvoid net_stat_get_misses_incr(struct net_conn *conn) {\n conn->stat_get_misses++;\n}\n\nbool net_conn_istls(struct net_conn *conn) {\n return conn->tls != 0;\n}\n"], ["/pogocache/src/main.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit main.c is the main entry point for the Pogocache program.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"conn.h\"\n#include \"sys.h\"\n#include \"cmds.h\"\n#include \"save.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"tls.h\"\n#include \"pogocache.h\"\n#include \"gitinfo.h\"\n#include \"uring.h\"\n\n// default user flags\nint nthreads = 0; // number of client threads\nchar *port = \"9401\"; // default tcp port (non-tls)\nchar *host = \"127.0.0.1\"; // default hostname or ip address\nchar *persist = \"\"; // file to load and save data to\nchar *unixsock = \"\"; // use a unix socket\nchar *reuseport = \"no\"; // reuse tcp port for other programs\nchar *tcpnodelay = \"yes\"; // disable nagle's algorithm\nchar *quickack = \"no\"; // enable quick acks\nchar *usecas = \"no\"; // enable compare and store\nchar *keepalive = \"yes\"; // socket keepalive setting\nint backlog = 1024; // network socket accept backlog\nint queuesize = 128; // event queue size\nchar *maxmemory = \"80%\"; // Maximum memory allowed - 80% total system\nchar *evict = \"yes\"; // evict keys when maxmemory reached\nint loadfactor = 75; // hashmap load factor\nchar *keysixpack = \"yes\"; // use sixpack compression on keys\nchar *trackallocs = \"no\"; // track allocations (for debugging)\nchar *auth = \"\"; // auth token or pa\nchar *tlsport = \"\"; // enable tls over tcp port\nchar *tlscertfile = \"\"; // tls cert file\nchar *tlskeyfile = \"\"; // tls key file\nchar *tlscacertfile = \"\"; // tls ca cert file\nchar *uring = \"yes\"; // use uring (linux only)\nint maxconns = 1024; // maximum number of sockets\nchar *noticker = \"no\";\nchar *warmup = \"yes\";\n\n// Global variables calculated in main().\n// These should never change during the lifetime of the process.\n// Other source files must use the \"extern const\" specifier.\nchar *version;\nchar *githash;\nuint64_t seed;\nsize_t sysmem;\nsize_t memlimit;\nint verb; // verbosity, 0=no, 1=verbose, 2=very, 3=extremely\nbool usesixpack;\nint useallocator;\nbool usetrackallocs;\nbool useevict;\nint nshards;\nbool usetls; // use tls security (pemfile required);\nbool useauth; // use auth password\nbool usecolor; // allow color in terminal\nchar *useid; // instance id (unique to every process run)\nint64_t procstart; // proc start boot time, for uptime stat\n\n// Global atomic variable. These are safe to read and modify by other source\n// files, as long as those sources use \"atomic_\" methods.\natomic_int shutdownreq; // shutdown request counter\natomic_int_fast64_t flush_delay; // delay in seconds to next async flushall\natomic_bool sweep; // mark for async sweep, asap\natomic_bool registered; // registration is active\natomic_bool lowmem; // system is in low memory mode.\n\nstruct pogocache *cache;\n\n// min max robinhood load factor (75% performs pretty well)\n#define MINLOADFACTOR_RH 55\n#define MAXLOADFACTOR_RH 95\n\nstatic void ready(void *udata) {\n (void)udata;\n printf(\"* Ready to accept connections\\n\");\n}\n\n#define noopt \"%s\"\n\n#define HELP(format, ...) \\\n fprintf(file, format, ##__VA_ARGS__)\n\n#define HOPT(opt, desc, format, ...) \\\n fprintf(file, \" \"); \\\n fprintf(file, \"%-22s \", opt); \\\n fprintf(file, \"%-30s \", desc); \\\n if (strcmp(format, noopt) != 0) { \\\n fprintf(file, \"(default: \" format \")\", ##__VA_ARGS__); \\\n } \\\n fprintf(file, \"\\n\");\n\nstatic int calc_nshards(int nprocs) {\n switch (nprocs) {\n case 1: return 64;\n case 2: return 128;\n case 3: return 256;\n case 4: return 512;\n case 5: return 1024;\n case 6: return 2048;\n default: return 4096;\n }\n}\n\nstatic void showhelp(FILE *file) {\n int nprocs = sys_nprocs();\n int nshards = calc_nshards(nprocs);\n\n HELP(\"Usage: %s [options]\\n\", \"pogocache\");\n HELP(\"\\n\");\n\n HELP(\"Basic options:\\n\");\n HOPT(\"-h hostname\", \"listening host\", \"%s\", host);\n HOPT(\"-p port\", \"listening port\", \"%s\", port);\n HOPT(\"-s socket\", \"unix socket file\", \"%s\", *unixsock?unixsock:\"none\");\n\n HOPT(\"-v,-vv,-vvv\", \"verbose logging level\", noopt, \"\");\n HELP(\"\\n\");\n \n HELP(\"Additional options:\\n\");\n HOPT(\"--threads count\", \"number of threads\", \"%d\", nprocs);\n HOPT(\"--maxmemory value\", \"set max memory usage\", \"%s\", maxmemory);\n HOPT(\"--evict yes/no\", \"evict keys at maxmemory\", \"%s\", evict);\n HOPT(\"--persist path\", \"persistence file\", \"%s\", *persist?persist:\"none\");\n HOPT(\"--maxconns conns\", \"maximum connections\", \"%d\", maxconns);\n HELP(\"\\n\");\n \n HELP(\"Security options:\\n\");\n HOPT(\"--auth passwd\", \"auth token or password\", \"%s\", *auth?auth:\"none\");\n#ifndef NOOPENSSL\n HOPT(\"--tlsport port\", \"enable tls on port\", \"%s\", \"none\");\n HOPT(\"--tlscert certfile\", \"tls cert file\", \"%s\", \"none\");\n HOPT(\"--tlskey keyfile\", \"tls key file\", \"%s\", \"none\");\n HOPT(\"--tlscacert cacertfile\", \"tls ca-cert file\", \"%s\", \"none\");\n#endif\n HELP(\"\\n\");\n\n HELP(\"Advanced options:\\n\");\n HOPT(\"--shards count\", \"number of shards\", \"%d\", nshards);\n HOPT(\"--backlog count\", \"accept backlog\", \"%d\", backlog);\n HOPT(\"--queuesize count\", \"event queuesize size\", \"%d\", queuesize);\n HOPT(\"--reuseport yes/no\", \"reuseport for tcp\", \"%s\", reuseport);\n HOPT(\"--tcpnodelay yes/no\", \"disable nagle's algo\", \"%s\", tcpnodelay);\n HOPT(\"--quickack yes/no\", \"use quickack (linux)\", \"%s\", quickack);\n HOPT(\"--uring yes/no\", \"use uring (linux)\", \"%s\", uring);\n HOPT(\"--loadfactor percent\", \"hashmap load factor\", \"%d\", loadfactor);\n HOPT(\"--keysixpack yes/no\", \"sixpack compress keys\", \"%s\", keysixpack);\n HOPT(\"--cas yes/no\", \"use compare and store\", \"%s\", usecas);\n HELP(\"\\n\");\n}\n\nstatic void showversion(FILE *file) {\n#ifdef CCSANI\n fprintf(file, \"pogocache %s (CCSANI)\\n\", version);\n#else\n fprintf(file, \"pogocache %s\\n\", version);\n#endif\n}\n\nstatic size_t calc_memlimit(char *maxmemory) {\n if (strcmp(maxmemory, \"unlimited\") == 0) {\n return SIZE_MAX;\n }\n char *oval = maxmemory;\n while (isspace(*maxmemory)) {\n maxmemory++;\n }\n char *end;\n errno = 0;\n double mem = strtod(maxmemory, &end);\n if (errno || !(mem > 0) || !isfinite(mem)) {\n goto fail;\n }\n while (isspace(*end)) {\n end++;\n }\n #define exteq(c) \\\n (tolower(end[0])==c&& (!end[1]||(tolower(end[1])=='b'&&!end[2])))\n\n if (strcmp(end, \"\") == 0) {\n return mem;\n } else if (strcmp(end, \"%\") == 0) {\n return (((double)mem)/100.0) * sysmem;\n } else if (exteq('k')) {\n return mem*1024.0;\n } else if (exteq('m')) {\n return mem*1024.0*1024.0;\n } else if (exteq('g')) {\n return mem*1024.0*1024.0*1024.0;\n } else if (exteq('t')) {\n return mem*1024.0*1024.0*1024.0*1024.0;\n }\nfail:\n fprintf(stderr, \"# Invalid maxmemory '%s'\\n\", oval);\n showhelp(stderr);\n exit(1);\n}\n\nstatic size_t setmaxrlimit(void) {\n size_t maxconns = 0;\n struct rlimit rl;\n if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {\n maxconns = rl.rlim_max;\n rl.rlim_cur = rl.rlim_max;\n rl.rlim_max = rl.rlim_max;\n if (setrlimit(RLIMIT_NOFILE, &rl) != 0) {\n perror(\"# setrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n } else {\n perror(\"# getrlimit(RLIMIT_NOFILE)\");\n abort();\n }\n return maxconns;\n}\n\nstatic void evicted(int shard, int reason, int64_t time, const void *key,\n size_t keylen, const void *value, size_t valuelen, int64_t expires,\n uint32_t flags, uint64_t cas, void *udata)\n{\n (void)value, (void)valuelen, (void)expires, (void)udata;\n return;\n printf(\". evicted shard=%d, reason=%d, time=%\" PRIi64 \", key='%.*s'\"\n \", flags=%\" PRIu32 \", cas=%\" PRIu64 \"\\n\",\n shard, reason, time, (int)keylen, (char*)key, flags, cas);\n}\n\n#define BEGIN_FLAGS() \\\n if (0) {\n#define BFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option %s missing value\\n\", opt); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n }\n#define TFLAG(opt, op) \\\n } else if (strcmp(argv[i], opt) == 0) { \\\n if (!dryrun) { \\\n op; \\\n }\n#define AFLAG(name, op) \\\n } else if (strcmp(argv[i], \"--\" name) == 0) { \\\n i++; \\\n if (i == argc) { \\\n fprintf(stderr, \"# Option --%s missing value\\n\", name); \\\n exit(1); \\\n } \\\n if (!dryrun) { \\\n char *flag = argv[i]; op; \\\n } \\\n } else if (strstr(argv[i], \"--\" name \"=\") == argv[i]) { \\\n if (!dryrun) { \\\n char *flag = argv[i]+strlen(name)+3; op; \\\n }\n#define END_FLAGS() \\\n } else { \\\n fprintf(stderr, \"# Unknown program option %s\\n\", argv[i]); \\\n exit(1); \\\n }\n\n#define INVALID_FLAG(name, value) \\\n fprintf(stderr, \"# Option --%s is invalid\\n\", name); \\\n exit(1);\n\nstatic atomic_bool loaded = false;\n\nvoid sigterm(int sig) {\n if (sig == SIGINT || sig == SIGTERM) {\n if (!atomic_load(&loaded) || !*persist) {\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n if (*persist) {\n printf(\"* Saving data to %s, please wait...\\n\", persist);\n int ret = save(persist, true);\n if (ret != 0) {\n perror(\"# Save failed\");\n _Exit(1);\n }\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n\n int count = atomic_fetch_add(&shutdownreq, 1);\n if (count > 0 && sig == SIGINT) {\n printf(\"# User forced shutdown\\n\");\n printf(\"# Pogocache exiting now\\n\");\n _Exit(0);\n }\n }\n}\n\nstatic void tick(void) {\n if (!atomic_load_explicit(&loaded, __ATOMIC_ACQUIRE)) {\n return;\n }\n // Memory usage check\n if (memlimit < SIZE_MAX) {\n struct sys_meminfo meminfo;\n sys_getmeminfo(&meminfo);\n size_t memusage = meminfo.rss;\n if (!lowmem) {\n if (memusage > memlimit) {\n atomic_store(&lowmem, true);\n if (verb > 0) {\n printf(\"# Low memory mode on\\n\");\n }\n }\n } else {\n if (memusage < memlimit) {\n atomic_store(&lowmem, false);\n if (verb > 0) {\n printf(\"# Low memory mode off\\n\");\n }\n }\n }\n }\n\n // Print allocations to terminal.\n if (usetrackallocs) {\n printf(\". keys=%zu, allocs=%zu, conns=%zu\\n\",\n pogocache_count(cache, 0), xallocs(), net_nconns());\n }\n\n}\n\nstatic void *ticker(void *arg) {\n (void)arg;\n while (1) {\n tick();\n sleep(1);\n }\n return 0;\n}\n\nstatic void listening(void *udata) {\n (void)udata;\n printf(\"* Network listener established\\n\");\n if (*persist) {\n if (!cleanwork(persist)) {\n // An error message has already been printed\n _Exit(0);\n }\n if (access(persist, F_OK) == 0) {\n printf(\"* Loading data from %s, please wait...\\n\", persist);\n struct load_stats stats;\n int64_t start = sys_now();\n int ret = load(persist, true, &stats);\n if (ret != 0) {\n perror(\"# Load failed\");\n _Exit(1);\n }\n double elapsed = (sys_now()-start)/1e9;\n printf(\"* Loaded %zu entries (%zu expired) (%.3f MB in %.3f secs) \"\n \"(%.0f entries/sec, %.0f MB/sec) \\n\", \n stats.ninserted, stats.nexpired,\n stats.csize/1024.0/1024.0, elapsed, \n (stats.ninserted+stats.nexpired)/elapsed, \n stats.csize/1024.0/1024.0/elapsed);\n }\n }\n atomic_store(&loaded, true);\n}\n\nstatic void yield(void *udata) {\n (void)udata;\n sched_yield();\n}\n\nint main(int argc, char *argv[]) {\n procstart = sys_now();\n\n // Intercept signals\n signal(SIGPIPE, SIG_IGN);\n signal(SIGINT, sigterm);\n signal(SIGTERM, sigterm);\n\n // Line buffer logging so pipes will stream.\n setvbuf(stdout, 0, _IOLBF, 0);\n setvbuf(stderr, 0, _IOLBF, 0);\n char guseid[17];\n memset(guseid, 0, 17);\n useid = guseid;\n sys_genuseid(useid); \n const char *maxmemorymb = 0;\n seed = sys_seed();\n verb = 0;\n usetls = false;\n useauth = false;\n lowmem = false;\n version = GITVERS;\n githash = GITHASH;\n\n \n\n\n if (uring_available()) {\n uring = \"yes\";\n } else {\n uring = \"no\";\n }\n\n atomic_init(&shutdownreq, 0);\n atomic_init(&flush_delay, 0);\n atomic_init(&sweep, false);\n atomic_init(®istered, false);\n\n // Parse program flags\n for (int ii = 0; ii < 2; ii++) {\n bool dryrun = ii == 0;\n for (int i = 1; i < argc; i++) {\n if (strcmp(argv[i], \"--help\") == 0) {\n showhelp(stdout);\n exit(0);\n }\n if (strcmp(argv[i], \"--version\") == 0) {\n showversion(stdout);\n exit(0);\n }\n BEGIN_FLAGS()\n BFLAG(\"-p\", port = flag)\n BFLAG(\"-h\", host = flag)\n BFLAG(\"-s\", unixsock = flag)\n TFLAG(\"-v\", verb = 1)\n TFLAG(\"-vv\", verb = 2)\n TFLAG(\"-vvv\", verb = 3)\n AFLAG(\"port\", port = flag)\n AFLAG(\"threads\", nthreads = atoi(flag))\n AFLAG(\"shards\", nshards = atoi(flag))\n AFLAG(\"backlog\", backlog = atoi(flag))\n AFLAG(\"queuesize\", queuesize = atoi(flag))\n AFLAG(\"maxmemory\", maxmemory = flag)\n AFLAG(\"evict\", evict = flag)\n AFLAG(\"reuseport\", reuseport = flag)\n AFLAG(\"uring\", uring = flag)\n AFLAG(\"tcpnodelay\", tcpnodelay = flag)\n AFLAG(\"keepalive\", keepalive = flag)\n AFLAG(\"quickack\", quickack = flag)\n AFLAG(\"trackallocs\", trackallocs = flag)\n AFLAG(\"cas\", usecas = flag)\n AFLAG(\"maxconns\", maxconns = atoi(flag))\n AFLAG(\"loadfactor\", loadfactor = atoi(flag))\n AFLAG(\"sixpack\", keysixpack = flag)\n AFLAG(\"seed\", seed = strtoull(flag, 0, 10))\n AFLAG(\"auth\", auth = flag)\n AFLAG(\"persist\", persist = flag)\n AFLAG(\"noticker\", noticker = flag)\n AFLAG(\"warmup\", warmup = flag)\n#ifndef NOOPENSSL\n // TLS flags\n AFLAG(\"tlsport\", tlsport = flag)\n AFLAG(\"tlscert\", tlscertfile = flag)\n AFLAG(\"tlscacert\", tlscacertfile = flag)\n AFLAG(\"tlskey\", tlskeyfile = flag)\n#endif\n // Hidden or alternative flags\n BFLAG(\"-t\", nthreads = atoi(flag)) // --threads=\n BFLAG(\"-m\", maxmemorymb = flag) // --maxmemory=M\n TFLAG(\"-M\", evict = \"no\") // --evict=no\n END_FLAGS()\n }\n }\n\n usecolor = isatty(fileno(stdout));\n\n if (strcmp(evict, \"yes\") == 0) {\n useevict = true;\n } else if (strcmp(evict, \"no\") == 0) {\n useevict = false;\n } else {\n INVALID_FLAG(\"evict\", evict);\n }\n\n bool usereuseport;\n if (strcmp(reuseport, \"yes\") == 0) {\n usereuseport = true;\n } else if (strcmp(reuseport, \"no\") == 0) {\n usereuseport = false;\n } else {\n INVALID_FLAG(\"reuseport\", reuseport);\n }\n\n if (strcmp(trackallocs, \"yes\") == 0) {\n usetrackallocs = true;\n } else if (strcmp(trackallocs, \"no\") == 0) {\n usetrackallocs = false;\n } else {\n INVALID_FLAG(\"trackallocs\", trackallocs);\n }\n\n bool usetcpnodelay;\n if (strcmp(tcpnodelay, \"yes\") == 0) {\n usetcpnodelay = true;\n } else if (strcmp(tcpnodelay, \"no\") == 0) {\n usetcpnodelay = false;\n } else {\n INVALID_FLAG(\"tcpnodelay\", tcpnodelay);\n }\n\n bool usekeepalive;\n if (strcmp(keepalive, \"yes\") == 0) {\n usekeepalive = true;\n } else if (strcmp(keepalive, \"no\") == 0) {\n usekeepalive = false;\n } else {\n INVALID_FLAG(\"keepalive\", keepalive);\n }\n\n\n bool usecasflag;\n if (strcmp(usecas, \"yes\") == 0) {\n usecasflag = true;\n } else if (strcmp(usecas, \"no\") == 0) {\n usecasflag = false;\n } else {\n INVALID_FLAG(\"usecas\", usecas);\n }\n\n if (maxconns <= 0) {\n maxconns = 1024;\n }\n\n\n#ifndef __linux__\n bool useuring = false;\n#else\n bool useuring;\n if (strcmp(uring, \"yes\") == 0) {\n useuring = true;\n } else if (strcmp(uring, \"no\") == 0) {\n useuring = false;\n } else {\n INVALID_FLAG(\"uring\", uring);\n }\n if (useuring) {\n if (!uring_available()) {\n useuring = false;\n }\n }\n#endif\n\n#ifndef __linux__\n quickack = \"no\";\n#endif\n bool usequickack;\n if (strcmp(quickack, \"yes\") == 0) {\n usequickack = true;\n } else if (strcmp(quickack, \"no\") == 0) {\n usequickack = false;\n } else {\n INVALID_FLAG(\"quickack\", quickack);\n }\n\n if (strcmp(keysixpack, \"yes\") == 0) {\n usesixpack = true;\n } else if (strcmp(keysixpack, \"no\") == 0) {\n usesixpack = false;\n } else {\n INVALID_FLAG(\"sixpack\", keysixpack);\n }\n\n // Threads\n if (nthreads <= 0) {\n nthreads = sys_nprocs();\n } else if (nthreads > 4096) {\n nthreads = 4096; \n }\n\n if (nshards == 0) {\n nshards = calc_nshards(nthreads);\n }\n if (nshards <= 0 || nshards > 65536) {\n nshards = 65536;\n }\n\n if (loadfactor < MINLOADFACTOR_RH) {\n loadfactor = MINLOADFACTOR_RH;\n printf(\"# loadfactor minumum set to %d\\n\", MINLOADFACTOR_RH);\n } else if (loadfactor > MAXLOADFACTOR_RH) {\n loadfactor = MAXLOADFACTOR_RH;\n printf(\"# loadfactor maximum set to %d\\n\", MAXLOADFACTOR_RH);\n }\n\n if (queuesize < 1) {\n queuesize = 1;\n printf(\"# queuesize adjusted to 1\\n\");\n } else if (queuesize > 4096) {\n queuesize = 4096;\n printf(\"# queuesize adjusted to 4096\\n\");\n }\n\n if (maxmemorymb) {\n size_t sz = strlen(maxmemorymb)+2;\n char *str = xmalloc(sz);\n snprintf(str, sz, \"%sM\", maxmemorymb);\n maxmemory = str;\n }\n\n if (!*port || strcmp(port, \"0\") == 0) {\n port = \"\";\n }\n\n if (!*tlsport || strcmp(tlsport, \"0\") == 0) {\n usetls = false;\n tlsport = \"\";\n } else {\n usetls = true;\n tls_init();\n }\n\n if (*auth) {\n useauth = true;\n }\n setmaxrlimit();\n sysmem = sys_memory();\n memlimit = calc_memlimit(maxmemory);\n\n if (memlimit == SIZE_MAX) {\n evict = \"no\";\n useevict = false;\n }\n\n struct pogocache_opts opts = {\n .yield = yield,\n .seed = seed,\n .malloc = xmalloc,\n .free = xfree,\n .nshards = nshards,\n .loadfactor = loadfactor,\n .usecas = usecasflag,\n .evicted = evicted,\n .allowshrink = true,\n .usethreadbatch = true,\n };\n // opts.yield = 0;\n\n cache = pogocache_new(&opts);\n if (!cache) {\n perror(\"pogocache_new\");\n abort();\n }\n\n // Print the program details\n printf(\"* Pogocache (pid: %d, arch: %s%s, version: %s, git: %s)\\n\",\n getpid(), sys_arch(), sizeof(uintptr_t)==4?\", mode: 32-bit\":\"\", version,\n githash);\n char buf0[64], buf1[64];\n char buf2[64];\n if (memlimit < SIZE_MAX) {\n snprintf(buf2, sizeof(buf2), \"%.0f%%/%s\", (double)memlimit/sysmem*100.0,\n memstr(memlimit, buf1));\n } else {\n strcpy(buf2, \"unlimited\");\n }\n printf(\"* Memory (system: %s, max: %s, evict: %s)\\n\", memstr(sysmem, buf0),\n buf2, evict);\n printf(\"* Features (verbosity: %s, sixpack: %s, cas: %s, persist: %s, \"\n \"uring: %s)\\n\",\n verb==0?\"normal\":verb==1?\"verbose\":verb==2?\"very\":\"extremely\",\n keysixpack, usecas, *persist?persist:\"none\", useuring?\"yes\":\"no\");\n char tcp_addr[256];\n snprintf(tcp_addr, sizeof(tcp_addr), \"%s:%s\", host, port);\n printf(\"* Network (port: %s, unixsocket: %s, backlog: %d, reuseport: %s, \"\n \"maxconns: %d)\\n\", *port?port:\"none\", *unixsock?unixsock:\"none\",\n backlog, reuseport, maxconns);\n printf(\"* Socket (tcpnodelay: %s, keepalive: %s, quickack: %s)\\n\",\n tcpnodelay, keepalive, quickack);\n printf(\"* Threads (threads: %d, queuesize: %d)\\n\", nthreads, queuesize);\n printf(\"* Shards (shards: %d, loadfactor: %d%%)\\n\", nshards, loadfactor);\n printf(\"* Security (auth: %s, tlsport: %s)\\n\", \n strlen(auth)>0?\"enabled\":\"disabled\", *tlsport?tlsport:\"none\");\n if (strcmp(noticker,\"yes\") == 0) {\n printf(\"# NO TICKER\\n\");\n } else {\n pthread_t th;\n int ret = pthread_create(&th, 0, ticker, 0);\n if (ret == -1) {\n perror(\"# pthread_create(ticker)\");\n exit(1);\n }\n }\n#ifdef DATASETOK\n printf(\"# DATASETOK\\n\");\n#endif\n#ifdef CMDGETNIL\n printf(\"# CMDGETNIL\\n\");\n#endif\n#ifdef CMDSETOK\n printf(\"# CMDSETOK\\n\");\n#endif\n#ifdef ENABLELOADREAD\n printf(\"# ENABLELOADREAD\\n\");\n#endif\n struct net_opts nopts = {\n .host = host,\n .port = port,\n .tlsport = tlsport,\n .unixsock = unixsock,\n .reuseport = usereuseport,\n .tcpnodelay = usetcpnodelay,\n .keepalive = usekeepalive,\n .quickack = usequickack,\n .backlog = backlog,\n .queuesize = queuesize,\n .nthreads = nthreads,\n .nowarmup = strcmp(warmup, \"no\") == 0,\n .nouring = !useuring,\n .listening = listening,\n .ready = ready,\n .data = evdata,\n .opened = evopened,\n .closed = evclosed,\n .maxconns = maxconns,\n };\n net_main(&nopts);\n return 0;\n}\n"], ["/pogocache/src/util.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit util.c provides various utilities and convenience functions.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n\n// Performs a case-insenstive equality test between the byte slice 'data' and\n// a c-string. It's expected that c-string is already lowercase and \n// null-terminated. The data does not need to be null-terminated.\nbool argeq_bytes(const void *data, size_t datalen, const char *cstr) {\n const char *p = data;\n const char *e = p+datalen;\n bool eq = true;\n while (eq && p < e && *cstr) {\n eq = tolower(*p) == *cstr;\n p++;\n cstr++;\n }\n return eq && *cstr == '\\0' && p == e;\n}\n\nbool argeq(struct args *args, int idx, const char *cstr) {\n return argeq_bytes(args->bufs[idx].data, args->bufs[idx].len, cstr);\n}\n\n// Safely adds two int64_t values and with clamping on overflow.\nint64_t int64_add_clamp(int64_t a, int64_t b) {\n if (!((a ^ b) < 0)) { // Opposite signs can't overflow\n if (a > 0) {\n if (b > INT64_MAX - a) {\n return INT64_MAX;\n }\n } else if (b < INT64_MIN - a) {\n return INT64_MIN;\n }\n }\n return a + b;\n}\n\n// Safely multiplies two int64_t values and with clamping on overflow.\nint64_t int64_mul_clamp(int64_t a, int64_t b) {\n if (a || b) {\n if (a > 0) {\n if (b > 0 && a > INT64_MAX / b) {\n return INT64_MAX;\n } else if (b < 0 && b < INT64_MIN / a) {\n return INT64_MIN;\n }\n } else {\n if (b > 0 && a < INT64_MIN / b) {\n return INT64_MIN;\n } else if (b < 0 && a < INT64_MAX / b) {\n return INT64_MAX;\n }\n }\n }\n return a * b;\n}\n\n/// https://github.com/tidwall/varint.c\nint varint_write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n if (x < 128) {\n *bytes = x;\n return 1;\n }\n int n = 0;\n do {\n bytes[n++] = (uint8_t)x | 128;\n x >>= 7;\n } while (x >= 128);\n bytes[n++] = (uint8_t)x;\n return n;\n}\n\nint varint_read_u64(const void *data, size_t len, uint64_t *x) {\n const uint8_t *bytes = data;\n if (len > 0 && bytes[0] < 128) {\n *x = bytes[0];\n return 1;\n }\n uint64_t b;\n *x = 0;\n size_t i = 0;\n while (i < len && i < 10) {\n b = bytes[i]; \n *x |= (b & 127) << (7 * i); \n if (b < 128) {\n return i + 1;\n }\n i++;\n }\n return i == 10 ? -1 : 0;\n}\n\nint varint_write_i64(void *data, int64_t x) {\n uint64_t ux = (uint64_t)x << 1;\n ux = x < 0 ? ~ux : ux;\n return varint_write_u64(data, ux);\n}\n\nint varint_read_i64(const void *data, size_t len, int64_t *x) {\n uint64_t ux;\n int n = varint_read_u64(data, len, &ux);\n *x = (int64_t)(ux >> 1);\n *x = ux&1 ? ~*x : *x;\n return n;\n}\n\n\nconst char *memstr(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0fB\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fK\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1fM\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1fG\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0G\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0M\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0K\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\nconst char *memstr_long(double size, char buf[64]) {\n if (size < 1024.0) {\n snprintf(buf, 64, \"%0.0f bytes\", size);\n } else if (size < 1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f KB\", size/1024.0);\n } else if (size < 1024.0*1024.0*1024.0) {\n snprintf(buf, 64, \"%0.1f MB\", size/1024.0/1024.0);\n } else {\n snprintf(buf, 64, \"%0.1f GB\", size/1024.0/1024.0/1024.0);\n }\n char *dot;\n if ((dot=strstr(buf, \".0 GB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 MB\"))) {\n memmove(dot, dot+2, 7);\n } else if ((dot=strstr(buf, \".0 KB\"))) {\n memmove(dot, dot+2, 7);\n }\n return buf;\n}\n\n// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html\nuint64_t mix13(uint64_t key) {\n key ^= (key >> 30);\n key *= UINT64_C(0xbf58476d1ce4e5b9);\n key ^= (key >> 27);\n key *= UINT64_C(0x94d049bb133111eb);\n key ^= (key >> 31);\n return key;\n}\n\nuint64_t rand_next(uint64_t *seed) {\n // pcg + mix13\n *seed = (*seed * UINT64_C(6364136223846793005)) + 1;\n return mix13(*seed);\n}\n\nvoid write_u64(void *data, uint64_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n bytes[4] = (x>>32)&0xFF;\n bytes[5] = (x>>40)&0xFF;\n bytes[6] = (x>>48)&0xFF;\n bytes[7] = (x>>56)&0xFF;\n}\n\nuint64_t read_u64(const void *data) {\n const uint8_t *bytes = data;\n uint64_t x = 0;\n x |= ((uint64_t)bytes[0])<<0;\n x |= ((uint64_t)bytes[1])<<8;\n x |= ((uint64_t)bytes[2])<<16;\n x |= ((uint64_t)bytes[3])<<24;\n x |= ((uint64_t)bytes[4])<<32;\n x |= ((uint64_t)bytes[5])<<40;\n x |= ((uint64_t)bytes[6])<<48;\n x |= ((uint64_t)bytes[7])<<56;\n return x;\n}\n\nvoid write_u32(void *data, uint32_t x) {\n uint8_t *bytes = data;\n bytes[0] = (x>>0)&0xFF;\n bytes[1] = (x>>8)&0xFF;\n bytes[2] = (x>>16)&0xFF;\n bytes[3] = (x>>24)&0xFF;\n}\n\nuint32_t read_u32(const void *data) {\n const uint8_t *bytes = data;\n uint32_t x = 0;\n x |= ((uint32_t)bytes[0])<<0;\n x |= ((uint32_t)bytes[1])<<8;\n x |= ((uint32_t)bytes[2])<<16;\n x |= ((uint32_t)bytes[3])<<24;\n return x;\n}\n\n// https://www.w3.org/TR/2003/REC-PNG-20031110/#D-CRCAppendix\nuint32_t crc32(const void *data, size_t len) {\n static __thread uint32_t table[256];\n static __thread bool computed = false;\n if (!computed) {\n for (uint32_t n = 0; n < 256; n++) {\n uint32_t c = n;\n for (int k = 0; k < 8; k++) {\n c = (c&1)?0xedb88320L^(c>>1):c>>1;\n }\n table[n] = c;\n }\n computed = true;\n }\n uint32_t crc = ~0;\n const uint8_t *buf = data;\n for (size_t n = 0; n < len; n++) {\n crc = table[(crc^buf[n])&0xff]^(crc>>8);\n }\n return ~crc;\n}\n\n// Attempts to read exactly len bytes from file stream\n// Returns the number of bytes read. Anything less than len means the stream\n// was closed or an error occured while reading.\n// Return -1 if no bytes were read and there was an error.\nssize_t read_full(int fd, void *data, size_t len) {\n uint8_t *bytes = data;\n size_t total = 0;\n while (len > 0) {\n ssize_t n = read(fd, bytes+total, len);\n if (n <= 0) {\n if (total > 0) {\n break;\n }\n return n;\n }\n len -= n;\n total += n;\n }\n return total;\n}\n\nsize_t u64toa(uint64_t x, uint8_t *data) {\n if (x < 10) {\n data[0] = '0'+x;\n return 1;\n }\n size_t i = 0;\n do {\n data[i++] = '0' + x % 10;\n } while ((x /= 10) > 0);\n // reverse the characters\n for (size_t j = 0, k = i-1; j < k; j++, k--) {\n uint8_t ch = data[j];\n data[j] = data[k];\n data[k] = ch;\n }\n return i;\n}\n\nsize_t i64toa(int64_t x, uint8_t *data) {\n if (x < 0) {\n data[0] = '-';\n data++;\n return u64toa(x * -1, data) + 1;\n }\n return u64toa(x, data);\n}\n\nuint32_t fnv1a_case(const char* buf, size_t len) {\n uint32_t hash = 0x811c9dc5;\n for (size_t i = 0; i < len; i++) {\n hash = (hash ^ tolower(buf[i])) * 0x01000193;\n }\n\treturn hash;\n}\n\nbool parse_i64(const char *data, size_t len, int64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n errno = 0;\n char *end;\n *x = strtoll(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool parse_u64(const char *data, size_t len, uint64_t *x) {\n char buf[24];\n if (len > 21) {\n return false;\n }\n memcpy(buf, data, len);\n buf[len] = '\\0';\n if (buf[0] == '-') {\n return false;\n }\n errno = 0;\n char *end;\n *x = strtoull(buf, &end, 10);\n return errno == 0 && end == buf+len;\n}\n\nbool argi64(struct args *args, int idx, int64_t *x) {\n return parse_i64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nbool argu64(struct args *args, int idx, uint64_t *x) {\n return parse_u64(args->bufs[idx].data, args->bufs[idx].len, x);\n}\n\nvoid *load_ptr(const uint8_t data[PTRSIZE]) {\n#if PTRSIZE == 4\n uint32_t uptr;\n memcpy(&uptr, data, 4);\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 6\n uint64_t uptr = 0;\n uptr |= ((uint64_t)data[0])<<0;\n uptr |= ((uint64_t)data[1])<<8;\n uptr |= ((uint64_t)data[2])<<16;\n uptr |= ((uint64_t)data[3])<<24;\n uptr |= ((uint64_t)data[4])<<32;\n uptr |= ((uint64_t)data[5])<<40;\n return (void*)(uintptr_t)uptr;\n#elif PTRSIZE == 8\n uint64_t uptr;\n memcpy(&uptr, data, 8);\n return (void*)(uintptr_t)uptr;\n#endif\n}\n\nvoid store_ptr(uint8_t data[PTRSIZE], void *ptr) {\n#if PTRSIZE == 4\n uint32_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 4);\n#elif PTRSIZE == 6\n uint64_t uptr = (uintptr_t)(void*)ptr;\n data[0] = (uptr>>0)&0xFF;\n data[1] = (uptr>>8)&0xFF;\n data[2] = (uptr>>16)&0xFF;\n data[3] = (uptr>>24)&0xFF;\n data[4] = (uptr>>32)&0xFF;\n data[5] = (uptr>>40)&0xFF;\n#elif PTRSIZE == 8\n uint64_t uptr = (uintptr_t)(void*)ptr;\n memcpy(data, &uptr, 8);\n#endif\n}\n\n// Increment a morris counter. The counter is clipped to 31 bits\nuint8_t morris_incr(uint8_t morris, uint64_t rand) {\n return morris>=31?31:morris+!(rand&((UINT64_C(1)< '~') {\n printf(\"\\\\x%02x\", c);\n } else {\n printf(\"%c\", c);\n }\n }\n}\n"], ["/pogocache/src/resp.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit resp.c provides the parser for the RESP wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args) {\n char *err = NULL;\n struct buf arg = { 0 };\n bool inarg = false;\n char quote = '\\0';\n for (size_t i = 0; i < len; i++) {\n char ch = bytes[i];\n if (inarg) {\n if (quote) {\n if (ch == '\\n') {\n goto fail_quotes;\n }\n if (ch == quote) { \n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n inarg = false;\n if (ch == '\\n') {\n i--;\n continue;\n }\n if (!isspace(ch)) {\n goto fail_quotes;\n }\n continue;\n } else if (ch == '\\\\') {\n i++;\n if (i == len) {\n break;\n }\n ch = bytes[i];\n switch (ch) {\n case 'n': ch = '\\n'; break;\n case 'r': ch = '\\r'; break;\n case 't': ch = '\\t'; break;\n }\n }\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n } else {\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else if (isspace(ch)) {\n args_append(args, arg.data, arg.len, false);\n if (args->len > MAXARGS) {\n goto fail_nargs;\n }\n arg.len = 0;\n if (ch == '\\n') {\n break;\n }\n inarg = false;\n } else {\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n } else {\n if (ch == '\\n') {\n buf_clear(&arg);\n return i+1;\n }\n if (isspace(ch)) {\n continue;\n }\n inarg = true;\n if (ch == '\"' || ch == '\\'') {\n quote = ch;\n } else {\n quote = 0;\n buf_append_byte(&arg, ch);\n if (arg.len > MAXARGSZ) {\n stat_store_too_large_incr(0);\n goto fail_argsz;\n }\n }\n }\n }\n buf_clear(&arg);\n return 0;\nfail_quotes:\n if (!err) err = \"ERR Protocol error: unbalanced quotes in request\";\nfail_nargs:\n if (!err) err = \"ERR Protocol error: invalid multibulk length\";\nfail_argsz:\n if (!err) err = \"ERR Protocol error: invalid bulk length\";\n/* fail: */\n if (err) {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \"%s\", err);\n }\n buf_clear(&arg);\n return -1;\n}\n\nstatic int64_t read_num(const char *data, size_t len, int64_t min, int64_t max,\n bool *ok)\n{\n errno = 0;\n char *end;\n int64_t x = strtoll(data, &end, 10);\n *ok = errno == 0 && (size_t)(end-data) == len && x >= min && x <= max;\n return x;\n}\n\n#define read_resp_num(var, min, max, errmsg) { \\\n char *p = memchr(bytes, '\\r', end-bytes); \\\n if (!p) { \\\n if (end-bytes > 32) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n return 0; \\\n } \\\n if (p+1 == end) { \\\n return 0; \\\n } \\\n if (*(p+1) != '\\n') { \\\n return -1; \\\n } \\\n bool ok; \\\n var = read_num(bytes, p-bytes, min, max, &ok); \\\n if (!ok) { \\\n parse_seterror(\"ERR Protocol error: \" errmsg); \\\n return -1; \\\n } \\\n bytes = p+2; \\\n}\n\n// returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\nssize_t parse_resp(const char *bytes, size_t len, struct args *args) {\n const char *start = bytes;\n const char *end = bytes+len;\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '*') {\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nargs;\n read_resp_num(nargs, LONG_MIN, MAXARGS, \"invalid multibulk length\");\n for (int j = 0; j < nargs; j++) {\n if (bytes == end) {\n return 0;\n }\n if (*(bytes++) != '$') {\n snprintf(parse_lasterr, sizeof(parse_lasterr), \n \"ERR Protocol error: expected '$', got '%c'\", *(bytes-1));\n return -1;\n }\n if (bytes == end) {\n return 0;\n }\n int64_t nbytes;\n read_resp_num(nbytes, 0, MAXARGSZ, \"invalid bulk length\");\n if (nbytes+2 > end-bytes) {\n return 0;\n }\n args_append(args, bytes, nbytes, true);\n bytes += nbytes+2;\n }\n return bytes-start;\n}\n\n"], ["/pogocache/src/conn.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit conn.c are interface functions for a network connection.\n#include \n#include \n#include \n#include \n#include \"net.h\"\n#include \"args.h\"\n#include \"cmds.h\"\n#include \"xmalloc.h\"\n#include \"parse.h\"\n#include \"util.h\"\n#include \"helppage.h\"\n\n#define MAXPACKETSZ 1048576 // Maximum read packet size\n\nstruct conn {\n struct net_conn *conn5; // originating connection\n struct buf packet; // current incoming packet\n int proto; // connection protocol (memcache, http, etc)\n bool auth; // user is authorized\n bool noreply; // only for memcache\n bool keepalive; // only for http\n int httpvers; // only for http\n struct args args; // command args, if any\n struct pg *pg; // postgres context, only if proto is postgres\n};\n\nbool conn_istls(struct conn *conn) {\n return net_conn_istls(conn->conn5);\n}\n\nint conn_proto(struct conn *conn) {\n return conn->proto;\n}\n\nbool conn_auth(struct conn *conn) {\n return conn->auth;\n}\n\nvoid conn_setauth(struct conn *conn, bool ok) {\n conn->auth = ok;\n}\n\nbool conn_isclosed(struct conn *conn) {\n return net_conn_isclosed(conn->conn5);\n}\n\nvoid conn_close(struct conn *conn) {\n net_conn_close(conn->conn5);\n}\n\nvoid evopened(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = xmalloc(sizeof(struct conn));\n memset(conn, 0, sizeof(struct conn));\n conn->conn5 = conn5;\n net_conn_setudata(conn5, conn);\n}\n\nvoid evclosed(struct net_conn *conn5, void *udata) {\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n buf_clear(&conn->packet);\n args_free(&conn->args);\n pg_free(conn->pg);\n xfree(conn);\n}\n\n// network data handler\n// The evlen may be zero when returning from a bgwork routine, while having\n// existing data in the connection packet.\nvoid evdata(struct net_conn *conn5, const void *evdata, size_t evlen,\n void *udata)\n{\n (void)udata;\n struct conn *conn = net_conn_udata(conn5);\n if (conn_isclosed(conn)) {\n goto close;\n }\n#ifdef DATASETOK\n if (evlen == 14 && memcmp(evdata, \"*1\\r\\n$4\\r\\nPING\\r\\n\", 14) == 0) {\n conn_write_raw(conn, \"+PONG\\r\\n\", 7);\n } else if (evlen == 13 && memcmp(evdata, \"*2\\r\\n$3\\r\\nGET\\r\\n\", 13) == 0) {\n conn_write_raw(conn, \"$1\\r\\nx\\r\\n\", 7);\n } else {\n conn_write_raw(conn, \"+OK\\r\\n\", 5);\n }\n return;\n#endif\n char *data;\n size_t len;\n bool copied;\n if (conn->packet.len == 0) {\n data = (char*)evdata;\n len = evlen;\n copied = false;\n } else {\n buf_append(&conn->packet, evdata, evlen);\n len = conn->packet.len;\n data = conn->packet.data;\n copied = true;\n }\n while (len > 0 && !conn_isclosed(conn)) {\n // Parse the command\n ssize_t n = parse_command(data, len, &conn->args, &conn->proto, \n &conn->noreply, &conn->httpvers, &conn->keepalive, &conn->pg);\n if (n == 0) {\n // Not enough data provided yet.\n break;\n } else if (n == -1) {\n // Protocol error occurred.\n conn_write_error(conn, parse_lasterror());\n if (conn->proto == PROTO_MEMCACHE) {\n // Memcache doesn't close, but we'll need to know the last\n // character position to continue and revert back to it so\n // we can attempt to continue to the next command.\n n = parse_lastmc_n();\n } else {\n // Close on protocol error\n conn_close(conn);\n break;\n }\n } else if (conn->args.len == 0) {\n // There were no command arguments provided.\n if (conn->proto == PROTO_POSTGRES) {\n if (!pg_respond(conn, conn->pg)) {\n // close connection\n conn_close(conn);\n break;\n }\n } else if (conn->proto == PROTO_MEMCACHE) {\n // Memcache simply returns a nondescript error.\n conn_write_error(conn, \"ERROR\");\n } else if (conn->proto == PROTO_HTTP) {\n // HTTP must always return arguments.\n assert(!\"PROTO_HTTP\");\n } else if (conn->proto == PROTO_RESP) {\n // RESP just continues until it gets args.\n }\n } else if (conn->proto == PROTO_POSTGRES && !conn->pg->ready) {\n // This should not have been reached. The client did not \n // send a startup message\n conn_close(conn);\n break;\n } else if (conn->proto != PROTO_POSTGRES || \n pg_precommand(conn, &conn->args, conn->pg))\n {\n evcommand(conn, &conn->args);\n }\n len -= n;\n data += n;\n if (net_conn_bgworking(conn->conn5)) {\n // BGWORK(0)\n break;\n }\n if (conn->proto == PROTO_HTTP) {\n conn_close(conn);\n }\n }\n if (conn_isclosed(conn)) {\n goto close;\n }\n if (len == 0) {\n if (copied) {\n if (conn->packet.cap > MAXPACKETSZ) {\n buf_clear(&conn->packet);\n }\n conn->packet.len = 0;\n }\n } else {\n if (copied) {\n memmove(conn->packet.data, data, len);\n conn->packet.len = len;\n } else {\n buf_append(&conn->packet, data, len);\n }\n }\n return;\nclose:\n conn_close(conn);\n}\n\nstruct bgworkctx {\n struct conn *conn;\n void *udata;\n void(*work)(void *udata);\n void(*done)(struct conn *conn, void *udata);\n};\n\nstatic void work5(void *udata) {\n struct bgworkctx *ctx = udata;\n ctx->work(ctx->udata);\n}\n\nstatic void done5(struct net_conn *conn, void *udata) {\n (void)conn;\n struct bgworkctx *ctx = udata;\n ctx->done(ctx->conn, ctx->udata);\n xfree(ctx);\n}\n\n// conn_bgwork processes work in a background thread.\n// When work is finished, the done function is called.\n// It's not safe to use the conn type in the work function.\nbool conn_bgwork(struct conn *conn, void(*work)(void *udata), \n void(*done)(struct conn *conn, void *udata), void *udata)\n{\n struct bgworkctx *ctx = xmalloc(sizeof(struct bgworkctx));\n ctx->conn = conn;\n ctx->udata = udata;\n ctx->work = work;\n ctx->done = done;\n if (!net_conn_bgwork(conn->conn5, work5, done5, ctx)) {\n xfree(ctx);\n return false;\n }\n return true;\n}\n\nstatic void writeln(struct conn *conn, char ch, const void *data, ssize_t len) {\n if (len < 0) {\n len = strlen(data);\n }\n net_conn_out_ensure(conn->conn5, 3+len);\n net_conn_out_write_byte_nocheck(conn->conn5, ch);\n size_t mark = net_conn_out_len(conn->conn5);\n net_conn_out_write_nocheck(conn->conn5, data, len);\n net_conn_out_write_byte_nocheck(conn->conn5, '\\r');\n net_conn_out_write_byte_nocheck(conn->conn5, '\\n');\n uint8_t *out = (uint8_t*)net_conn_out(conn->conn5);\n for (ssize_t i = mark; i < len; i++) {\n if (out[i] < ' ') {\n out[i] = ' ';\n }\n }\n}\n\nstatic void write_error(struct conn *conn, const char *err, bool server) {\n if (conn->proto == PROTO_MEMCACHE) {\n if (strstr(err, \"ERR \") == err) {\n // convert to client or server error\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n if (server) {\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err+4); \n } else {\n snprintf(err2, err2sz, \"CLIENT_ERROR %s\\r\\n\", err+4); \n }\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n if (server) {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"SERVER_ERROR %s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else if (strstr(err, \"CLIENT_ERROR \") == err || \n strstr(err, \"CLIENT_ERROR \") == err)\n {\n size_t err2sz = strlen(err)+32;\n char *err2 = xmalloc(err2sz);\n snprintf(err2, err2sz, \"%s\\r\\n\", err);\n conn_write_raw(conn, err2, strlen(err2));\n xfree(err2);\n } else {\n conn_write_raw(conn, \"ERROR\\r\\n\", 7);\n }\n }\n } else if (conn->proto == PROTO_POSTGRES) {\n if (strstr(err, \"ERR \") == err) {\n err = err+4;\n }\n pg_write_error(conn, err);\n pg_write_ready(conn, 'I');\n } else if (conn->proto == PROTO_HTTP) {\n if (strstr(err, \"ERR \") == err) {\n err += 4;\n }\n if (strcmp(err, \"Show Help HTML\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_HTML, -1);\n } else if (strcmp(err, \"Show Help TEXT\") == 0) {\n conn_write_http(conn, 200, \"OK\", HELPPAGE_TEXT, -1);\n } else if (strcmp(err, \"Method Not Allowed\") == 0) {\n conn_write_http(conn, 405, \"Method Not Allowed\", \n \"Method Not Allowed\\r\\n\", -1);\n } else if (strcmp(err, \"Unauthorized\") == 0) {\n conn_write_http(conn, 401, \"Unauthorized\", \n \"Unauthorized\\r\\n\", -1);\n } else if (strcmp(err, \"Bad Request\") == 0) {\n conn_write_http(conn, 400, \"Bad Request\", \n \"Bad Request\\r\\n\", -1);\n } else {\n size_t sz = strlen(err)+32;\n char *err2 = xmalloc(sz);\n snprintf(err2, sz, \"ERR %s\\r\\n\", err);\n conn_write_http(conn, 500, \"Internal Server Error\", \n err2, -1);\n xfree(err2);\n }\n } else {\n writeln(conn, '-', err, -1);\n }\n}\n\nvoid conn_write_error(struct conn *conn, const char *err) {\n bool server = false;\n if (strcmp(err, ERR_OUT_OF_MEMORY) == 0) {\n server = true;\n }\n write_error(conn, err, server);\n}\n\nvoid conn_write_string(struct conn *conn, const char *cstr) {\n writeln(conn, '+', cstr, -1);\n}\n\nvoid conn_write_null(struct conn *conn) {\n net_conn_out_write(conn->conn5, \"$-1\\r\\n\", 5);\n}\n\nvoid resp_write_bulk(struct buf *buf, const void *data, size_t len) {\n uint8_t str[32];\n size_t n = u64toa(len, str);\n buf_append_byte(buf, '$');\n buf_append(buf, str, n);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n buf_append(buf, data, len);\n buf_append_byte(buf, '\\r');\n buf_append_byte(buf, '\\n');\n}\n\nvoid conn_write_bulk(struct conn *conn, const void *data, size_t len) {\n net_conn_out_ensure(conn->conn5, 32+len);\n size_t olen = net_conn_out_len(conn->conn5);\n uint8_t *base = (uint8_t*)net_conn_out(conn->conn5)+olen;\n uint8_t *p = base;\n *(p++) = '$';\n p += u64toa(len, p);\n *(p++) = '\\r';\n *(p++) = '\\n';\n memcpy(p, data, len);\n p += len;\n *(p++) = '\\r';\n *(p++) = '\\n';\n net_conn_out_setlen(conn->conn5, olen + (p-base));\n}\n\nvoid conn_write_raw(struct conn *conn, const void *data, size_t len) {\n net_conn_out_write(conn->conn5, data, len);\n}\n\nvoid conn_write_http(struct conn *conn, int code, const char *status,\n const void *body, ssize_t bodylen)\n{\n if (bodylen == -1) {\n if (!body) {\n body = status;\n }\n bodylen = strlen(body);\n }\n char resp[512];\n size_t n = snprintf(resp, sizeof(resp), \n \"HTTP/1.1 %d %s\\r\\n\"\n \"Content-Length: %zu\\r\\n\"\n \"Connection: Close\\r\\n\"\n \"\\r\\n\",\n code, status, bodylen);\n conn_write_raw(conn, resp, n);\n if (bodylen > 0) {\n conn_write_raw(conn, body, bodylen);\n }\n}\n\nvoid conn_write_array(struct conn *conn, size_t count) {\n uint8_t str[24];\n size_t n = u64toa(count, str);\n writeln(conn, '*', str, n);\n}\n\nvoid conn_write_uint(struct conn *conn, uint64_t value) {\n uint8_t buf[24];\n size_t n = u64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, '+', buf, n); // the '+' is needed for unsigned int\n }\n}\n\nvoid conn_write_int(struct conn *conn, int64_t value) {\n uint8_t buf[24];\n size_t n = i64toa(value, buf);\n if (conn->proto == PROTO_MEMCACHE) {\n conn_write_raw(conn, buf, n);\n } else {\n writeln(conn, ':', buf, n);\n }\n}\n\nvoid conn_write_raw_cstr(struct conn *conn, const char *cstr) {\n conn_write_raw(conn, cstr, strlen(cstr));\n}\n\nvoid conn_write_bulk_cstr(struct conn *conn, const char *cstr) {\n conn_write_bulk(conn, cstr, strlen(cstr));\n}\n\nvoid stat_cmd_get_incr(struct conn *conn) {\n net_stat_cmd_get_incr(conn->conn5);\n}\n\nvoid stat_cmd_set_incr(struct conn *conn) {\n net_stat_cmd_set_incr(conn->conn5);\n}\n\nvoid stat_get_hits_incr(struct conn *conn) {\n net_stat_get_hits_incr(conn->conn5);\n}\n\nvoid stat_get_misses_incr(struct conn *conn) {\n net_stat_get_misses_incr(conn->conn5);\n}\n\nbool pg_execute(struct conn *conn) {\n return conn->pg->execute;\n}\n\nstruct pg *conn_pg(struct conn *conn) {\n return conn->pg;\n}\n"], ["/pogocache/src/memcache.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit memcache.c provides the parser for the Memcache wire protocol.\n#include \n#include \n#include \n#include \n#include \n#include \"util.h\"\n#include \"stats.h\"\n#include \"parse.h\"\n\nstatic __thread size_t mc_n = 0;\n\nsize_t parse_lastmc_n(void) {\n return mc_n;\n}\n\nbool mc_valid_key(struct args *args, int i) {\n const uint8_t *key = (uint8_t*)args->bufs[i].data;\n size_t len = args->bufs[i].len;\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] == 0x7F) {\n return false;\n }\n }\n return true;\n}\n\nenum mc_cmd { MC_UNKNOWN, \n // writers (optional reply)\n MC_SET, MC_ADD, MC_REPLACE, MC_APPEND, MC_PREPEND, MC_CAS, // storage\n MC_INCR, MC_DECR, // increment/decrement\n MC_FLUSH_ALL, MC_DELETE, // deletion\n MC_TOUCH, // touch\n MC_VERBOSITY, // logging\n // readers (always replys)\n MC_GET, MC_GETS, // retreival\n MC_GAT, MC_GATS, // get and touch\n MC_VERSION, MC_STATS, // information\n MC_QUIT, // client\n};\n\nstatic bool is_mc_store_cmd(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_CAS;\n}\n\nstatic bool is_mc_noreplyable(enum mc_cmd cmd) {\n return cmd >= MC_SET && cmd <= MC_VERBOSITY;\n}\n\nstatic ssize_t parse_memcache_telnet(const char *data, size_t len, \n struct args *args)\n{\n const char *p = data;\n const char *end = data+len;\n const char *s = p;\n char last = 0;\n while (p < end) {\n char ch = *(p++);\n if (ch == ' ') {\n size_t wn = p-s-1;\n // if (wn > 0) {\n args_append(args, s, wn, true);\n s = p;\n continue;\n }\n if (ch == '\\n') {\n size_t wn = p-s-1;\n if (last == '\\r') {\n wn--;\n }\n if (wn > 0) {\n args_append(args, s, wn, true);\n }\n return p-data;\n }\n last = ch;\n }\n return 0;\n}\n\nssize_t parse_memcache(const char *data, size_t len, struct args *args, \n bool *noreply)\n{\n ssize_t n = parse_memcache_telnet(data, len, args);\n if (n <= 0 || args->len == 0) {\n return n;\n }\n // args_print(args);\n mc_n = n;\n enum mc_cmd cmd;\n struct args args2 = { 0 };\n *noreply = false;\n // check for common get-2\n if (args->len == 2 && arg_const_eq(args, 0, \"get\")) {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args->bufs[0].data = \"mget\";\n args->bufs[0].len = 4;\n return n;\n }\n // Check for common set-5 (allows for expiry)\n if (args->len == 5 && arg_const_eq(args, 0, \"set\")) {\n if (args->bufs[2].len == 1 && args->bufs[2].data[0] == '0') {\n if (!mc_valid_key(args, 1)) {\n if (args->bufs[1].len == 0) {\n return -1;\n }\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool expset = false;\n int64_t x;\n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n expset = true;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n // replace the \"flags\" with a value\n args->bufs[2].len = value_len;\n args->bufs[2].data = (void*)value;\n args->len = 3;\n if (expset) {\n // add the \"ex \" to last two arguments\n args->bufs[4] = args->bufs[3];\n args->bufs[3].data = \"ex\";\n args->bufs[3].len = 2;\n args->len = 5;\n }\n return n;\n } else {\n // flags was set, use plus branch\n cmd = MC_SET;\n goto set_plus;\n }\n }\n // Otherwise use lookup command table. This could be optimized into a\n // switch table or hash table. See cmds.c for hash table example.\n cmd =\n arg_const_eq(args, 0, \"set\") ? MC_SET : // XY\n arg_const_eq(args, 0, \"add\") ? MC_ADD : // XY\n arg_const_eq(args, 0, \"cas\") ? MC_CAS : // XY\n arg_const_eq(args, 0, \"replace\") ? MC_REPLACE : // XY\n arg_const_eq(args, 0, \"get\") ? MC_GET : // XY\n arg_const_eq(args, 0, \"delete\") ? MC_DELETE : // XY\n arg_const_eq(args, 0, \"append\") ? MC_APPEND : // XY\n arg_const_eq(args, 0, \"prepend\") ? MC_PREPEND : // XY\n arg_const_eq(args, 0, \"gets\") ? MC_GETS : // XY\n arg_const_eq(args, 0, \"incr\") ? MC_INCR : // XY\n arg_const_eq(args, 0, \"decr\") ? MC_DECR: // XY\n arg_const_eq(args, 0, \"touch\") ? MC_TOUCH : // X\n arg_const_eq(args, 0, \"gat\") ? MC_GAT : // X\n arg_const_eq(args, 0, \"gats\") ? MC_GATS : // X\n arg_const_eq(args, 0, \"flush_all\") ? MC_FLUSH_ALL : // X\n arg_const_eq(args, 0, \"stats\") ? MC_STATS : // X\n arg_const_eq(args, 0, \"version\") ? MC_VERSION : // X\n arg_const_eq(args, 0, \"quit\") ? MC_QUIT : // XY\n arg_const_eq(args, 0, \"verbosity\") ? MC_VERBOSITY : // X\n MC_UNKNOWN;\n if (cmd == MC_UNKNOWN) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (is_mc_noreplyable(cmd)) {\n if (arg_const_eq(args, args->len-1, \"noreply\")) {\n *noreply = true;\n buf_clear(&args->bufs[args->len-1]);\n args->len--;\n }\n }\n if (is_mc_store_cmd(cmd)) {\n // Store commands include 'set', 'add', 'replace', 'append', 'prepend',\n // and 'cas'.\n if ((cmd == MC_CAS && args->len != 6) && \n (cmd != MC_CAS && args->len != 5))\n {\n parse_seterror(\"ERROR\");\n return -1;\n }\n set_plus:\n // check all values before continuing\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n int64_t x;\n if (!argi64(args, 2, &x) || x < 0) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 3, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n if (!argi64(args, 4, &x) || x < 0 || x > MAXARGSZ) {\n stat_store_too_large_incr(0);\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n bool hascas = false;\n char cas[24] = \"0\";\n if (cmd == MC_CAS) {\n hascas = true;\n uint64_t y;\n if (!argu64(args, 5, &y)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n assert(args->bufs[5].len < sizeof(cas));\n memcpy(cas, args->bufs[5].data, args->bufs[5].len);\n cas[args->bufs[5].len] = '\\0';\n buf_clear(&args->bufs[5]);\n args->len--;\n }\n\n // Storage commands must read a value that follows the first line.\n if (len-n < (size_t)x+2) {\n return 0;\n }\n const char *value = data+n;\n size_t value_len = x;\n n += x+2;\n mc_n = n;\n if (data[n-2] != '\\r' || data[n-1] != '\\n') {\n parse_seterror(CLIENT_ERROR_BAD_CHUNK);\n return -1;\n }\n\n // Reconstruct the command into a RESP format. \n bool is_append_prepend = false;\n switch (cmd) {\n case MC_APPEND:\n args_append(&args2, \"append\", 6, true);\n is_append_prepend = true;\n break;\n case MC_PREPEND:\n args_append(&args2, \"prepend\", 7, true);\n is_append_prepend = true;\n break;\n default:\n args_append(&args2, \"set\", 3, true);\n break;\n }\n // Move key arg to new args\n take_and_append_arg(1);\n // Add value arg\n args_append(&args2, value, value_len, true);\n if (!is_append_prepend) {\n if (!(args->bufs[2].len == 1 && args->bufs[2].data[0] == '0')) {\n args_append(&args2, \"flags\", 5, true);\n take_and_append_arg(2);\n }\n \n if (!(args->bufs[3].len == 1 && args->bufs[3].data[0] == '0')) {\n args_append(&args2, \"ex\", 2, true);\n take_and_append_arg(3);\n }\n if (cmd == MC_ADD) {\n args_append(&args2, \"nx\", 2, true);\n } else if (cmd == MC_REPLACE) {\n args_append(&args2, \"xx\", 2, true);\n }\n if (hascas) {\n args_append(&args2, \"cas\", 3, true);\n args_append(&args2, cas, strlen(cas), false);\n }\n }\n } else if (cmd == MC_GET) {\n // Convert 'get * into 'MGET *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mget\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_DELETE) {\n // Convert 'delete ' into 'DEL '\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (args->len > 2) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"del\", 3, true);\n take_and_append_arg(1);\n } else if (cmd == MC_GETS) {\n // Convert 'gets * into 'MGETS *'\n if (args->len == 1) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check all keys\n for (size_t i = 1; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"mgets\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GAT) {\n // Convert 'gat * into 'gat *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gat\", 3, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_GATS) {\n // Convert 'gats * into 'gats *'\n if (args->len <= 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check exptime\n int64_t x;\n if (!argi64(args, 2, &x)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n // check all keys\n for (size_t i = 2; i < args->len; i++) {\n if (!mc_valid_key(args, i)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n }\n args_append(&args2, \"gats\", 4, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_STATS) {\n args_append(&args2, \"stats\", 5, true);\n for (size_t i = 1; i < args->len; i++) {\n take_and_append_arg(i);\n }\n } else if (cmd == MC_INCR) {\n // Convert 'incr into 'uincrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"uincrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_DECR) {\n // Convert 'decr into 'udecrby '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n // check key\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"udecrby\", 7, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_TOUCH) {\n // Convert 'touch ' into 'expire '\n if (args->len != 3) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n if (!mc_valid_key(args, 1)) {\n parse_seterror(CLIENT_ERROR_BAD_FORMAT);\n return -1;\n }\n args_append(&args2, \"expire\", 6, true);\n take_and_append_arg(1);\n take_and_append_arg(2);\n } else if (cmd == MC_FLUSH_ALL) {\n // Convert 'flush_all [delay]' into 'FLUSHALL [DELAY seconds]'\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"flushall\", 8, true);\n if (args->len == 2) {\n args_append(&args2, \"delay\", 5, true);\n take_and_append_arg(1);\n }\n } else if (cmd == MC_QUIT) {\n args_append(&args2, \"quit\", 4, true);\n *noreply = true;\n } else if (cmd == MC_VERSION) {\n args_append(&args2, \"version\", 7, true);\n *noreply = false;\n } else if (cmd == MC_VERBOSITY) {\n if (args->len > 2) {\n parse_seterror(\"ERROR\");\n return -1;\n }\n args_append(&args2, \"verbosity\", 7, true);\n take_and_append_arg(1);\n } else {\n return -1;\n }\n args_free(args);\n *args = args2;\n return n;\n}\n"], ["/pogocache/src/http.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit http.c provides the parser for the HTTP wire protocol.\n#define _GNU_SOURCE \n#include \n#include \n#include \n#include \n#include \"stats.h\"\n#include \"util.h\"\n#include \"parse.h\"\n\nextern const bool useauth;\nextern const char *auth;\n\nbool http_valid_key(const char *key, size_t len) {\n if (len == 0 || len > 250) {\n return false;\n }\n for (size_t i = 0; i < len; i++) {\n if (key[i] <= ' ' || key[i] >= 0x7F || key[i] == '%' || key[i] == '+' ||\n key[i] == '@' || key[i] == '$' || key[i] == '?' || key[i] == '=') \n {\n return false;\n }\n }\n return true;\n}\n\nssize_t parse_http(const char *data, size_t len, struct args *args, \n int *httpvers, bool *keepalive)\n{\n *keepalive = false;\n *httpvers = 0;\n const char *method = 0;\n size_t methodlen = 0;\n const char *uri = 0;\n size_t urilen = 0;\n int proto = 0;\n const char *hdrname = 0; \n size_t hdrnamelen = 0;\n const char *hdrval = 0;\n size_t hdrvallen = 0;\n size_t bodylen = 0;\n bool nocontentlength = true;\n bool html = false;\n const char *authhdr = 0;\n size_t authhdrlen = 0;\n const char *p = data;\n const char *e = p+len;\n const char *s = p;\n while (p < e) {\n if (*p == ' ') {\n method = s;\n methodlen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == ' ') {\n uri = s;\n urilen = p-s;\n p++;\n break;\n }\n if (*p == '\\n') {\n goto badreq;\n }\n p++;\n }\n s = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n if (p-s-1 != 8 || !bytes_const_eq(s, 5, \"HTTP/\") || \n s[5] < '0' || s[5] > '9' || s[6] != '.' || \n s[7] < '0' || s[7] > '9')\n {\n goto badproto;\n }\n proto = (s[5]-'0')*10+(s[7]-'0');\n if (proto < 9 || proto >= 30) {\n goto badproto;\n }\n if (proto >= 11) {\n *keepalive = true;\n }\n *httpvers = proto;\n p++;\n goto readhdrs;\n }\n \n p++;\n }\n goto badreq;\nreadhdrs:\n // Parse the headers, pulling the pairs along the way.\n while (p < e) {\n hdrname = p;\n while (p < e) {\n if (*p == ':') {\n hdrnamelen = p-hdrname;\n p++;\n while (p < e && *p == ' ') {\n p++;\n }\n hdrval = p;\n while (p < e) {\n if (*p == '\\n') {\n if (*(p-1) != '\\r') {\n goto badreq;\n }\n hdrvallen = p-hdrval-1;\n // printf(\"[%.*s]=[%.*s]\\n\", (int)hdrnamelen, hdrname,\n // (int)hdrvallen, hdrval);\n // We have a new header pair (hdrname, hdrval);\n if (argeq_bytes(hdrname, hdrnamelen, \"content-length\")){\n uint64_t x;\n if (!parse_u64(hdrval, hdrvallen, &x) || \n x > MAXARGSZ)\n {\n stat_store_too_large_incr(0);\n goto badreq;\n }\n bodylen = x;\n nocontentlength = false;\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"connection\"))\n {\n *keepalive = argeq_bytes(hdrval, hdrvallen, \n \"keep-alive\");\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"accept\"))\n {\n if (memmem(hdrval, hdrvallen, \"text/html\", 9) != 0){\n html = true;\n }\n } else if (argeq_bytes(hdrname, hdrnamelen,\n \"authorization\"))\n {\n authhdr = hdrval;\n authhdrlen = hdrvallen;\n }\n p++;\n if (p < e && *p == '\\r') {\n p++;\n if (p < e && *p == '\\n') {\n p++;\n } else {\n goto badreq;\n }\n goto readbody;\n }\n break;\n }\n p++;\n }\n break;\n }\n p++;\n }\n }\n return 0;\nreadbody:\n // read the content body\n if ((size_t)(e-p) < bodylen) {\n return 0;\n }\n const char *body = p;\n p = e;\n\n // check\n if (urilen == 0 || uri[0] != '/') {\n goto badreq;\n }\n uri++;\n urilen--;\n const char *ex = 0;\n size_t exlen = 0;\n const char *flags = 0;\n size_t flagslen = 0;\n const char *cas = 0;\n size_t caslen = 0;\n const char *qauth = 0;\n size_t qauthlen = 0;\n bool xx = false;\n bool nx = false;\n // Parse the query string, pulling the pairs along the way.\n size_t querylen = 0;\n const char *query = memchr(uri, '?', urilen);\n if (query) {\n querylen = urilen-(query-uri);\n urilen = query-uri;\n query++;\n querylen--;\n const char *qkey;\n size_t qkeylen;\n const char *qval;\n size_t qvallen;\n size_t j = 0;\n size_t k = 0;\n for (size_t i = 0; i < querylen; i++) {\n if (query[i] == '=') {\n k = i;\n i++;\n for (; i < querylen; i++) {\n if (query[i] == '&') {\n break;\n }\n }\n qval = query+k+1;\n qvallen = i-k-1;\n qkeyonly:\n qkey = query+j;\n qkeylen = k-j;\n // We have a new query pair (qkey, qval);\n if (bytes_const_eq(qkey, qkeylen, \"flags\")) {\n flags = qval;\n flagslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"ex\") || \n bytes_const_eq(qkey, qkeylen, \"ttl\"))\n {\n ex = qval;\n exlen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"cas\")) {\n cas = qval;\n caslen = qvallen;\n } else if (bytes_const_eq(qkey, qkeylen, \"xx\")) {\n xx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"nx\")) {\n nx = true;\n } else if (bytes_const_eq(qkey, qkeylen, \"auth\")) {\n qauth = qval;\n qauthlen = qvallen;\n }\n j = i+1;\n } else if (query[i] == '&' || i == querylen-1) {\n qval = 0;\n qvallen = 0;\n if (i == querylen-1) {\n i++;\n }\n k = i;\n goto qkeyonly;\n }\n }\n }\n // The entire HTTP request is complete.\n // Turn request into valid command arguments.\n if (bytes_const_eq(method, methodlen, \"GET\")) {\n if (urilen > 0 && uri[0] == '@') {\n // system command such as @stats or @flushall\n goto badreq;\n } else if (urilen == 0) {\n goto showhelp;\n } else {\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"get\", 3, true);\n args_append(args, uri, urilen, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"PUT\")) {\n if (nocontentlength) {\n // goto badreq;\n }\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"set\", 3, true);\n args_append(args, uri, urilen, true);\n args_append(args, body, bodylen, true);\n if (cas) {\n args_append(args, \"cas\", 3, true);\n args_append(args, cas, caslen, true);\n }\n if (ex) {\n args_append(args, \"ex\", 2, true);\n args_append(args, ex, exlen, true);\n }\n if (flags) {\n args_append(args, \"flags\", 5, true);\n args_append(args, flags, flagslen, true);\n }\n if (xx) {\n args_append(args, \"xx\", 2, true);\n }\n if (nx) {\n args_append(args, \"nx\", 2, true);\n }\n } else if (bytes_const_eq(method, methodlen, \"DELETE\")) {\n if (urilen > 0 && uri[0] == '@') {\n goto badreq;\n }\n if (!http_valid_key(uri, urilen)) {\n goto badkey;\n }\n args_append(args, \"del\", 3, true);\n args_append(args, uri, urilen, true);\n } else {\n parse_seterror(\"Method Not Allowed\");\n goto badreq;\n }\n\n // Check authorization\n const char *authval = 0;\n size_t authvallen = 0;\n if (qauthlen > 0) {\n authval = qauth;\n authvallen = qauthlen;\n } else if (authhdrlen > 0) {\n if (authhdrlen >= 7 && strncmp(authhdr, \"Bearer \", 7) == 0) {\n authval = authhdr + 7;\n authvallen = authhdrlen - 7;\n } else {\n goto unauthorized;\n }\n }\n if (useauth || authvallen > 0) {\n stat_auth_cmds_incr(0);\n size_t authlen = strlen(auth);\n if (authvallen != authlen || memcmp(auth, authval, authlen) != 0) {\n stat_auth_errors_incr(0);\n goto unauthorized;\n }\n\n }\n return e-data;\nbadreq:\n parse_seterror(\"Bad Request\");\n return -1;\nbadproto:\n parse_seterror(\"Bad Request\");\n return -1;\nbadkey:\n parse_seterror(\"Invalid Key\");\n return -1;\nunauthorized:\n parse_seterror(\"Unauthorized\");\n return -1;\nshowhelp:\n if (html) {\n parse_seterror(\"Show Help HTML\");\n } else {\n parse_seterror(\"Show Help TEXT\");\n }\n return -1;\n}\n"], ["/pogocache/src/sys.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit sys.c provides various system-level functions.\n#if __linux__\n#define _GNU_SOURCE\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#ifdef __APPLE__\n#include \n#include \n#endif\n#include \"sys.h\"\n\nint sys_nprocs(void) {\n static atomic_int nprocsa = 0;\n int nprocs = atomic_load_explicit(&nprocsa, __ATOMIC_RELAXED);\n if (nprocs > 0) {\n return nprocs;\n }\n int logical = sysconf(_SC_NPROCESSORS_CONF);\n logical = logical < 1 ? 1 : logical;\n int physical = logical;\n int affinity = physical;\n#ifdef __linux__\n affinity = 0;\n cpu_set_t mask;\n CPU_ZERO(&mask);\n if (sched_getaffinity(0, sizeof(mask), &mask) == -1) {\n perror(\"sched_getaffinity\");\n return 1;\n }\n for (int i = 0; i < CPU_SETSIZE; i++) {\n if (CPU_ISSET(i, &mask)) {\n affinity++;\n }\n }\n double hyper = ceil((double)logical / (double)physical);\n hyper = hyper < 1 ? 1 : hyper;\n affinity /= hyper;\n#endif\n nprocs = affinity;\n nprocs = nprocs < 1 ? 1 : nprocs;\n atomic_store_explicit(&nprocsa, nprocs, __ATOMIC_RELAXED);\n return nprocs;\n}\n\n#ifndef __linux__\n#include \n#endif\n\nsize_t sys_memory(void) {\n size_t sysmem = 0;\n#ifdef __linux__\n FILE *f = fopen(\"/proc/meminfo\", \"rb\");\n if (f) {\n char buf[4096];\n size_t n = fread(buf, 1, sizeof(buf)-1, f);\n buf[n] = '\\0';\n char *s = 0;\n char *e = 0;\n s = strstr(buf, \"MemTotal\");\n if (s) s = strstr(s, \": \");\n if (s) e = strstr(s, \"\\n\");\n if (e) {\n *e = '\\0';\n s += 2;\n while (isspace(*s)) s++;\n if (strstr(s, \" kB\")) {\n s[strstr(s, \" kB\")-s] = '\\0';\n }\n errno = 0;\n char *end;\n int64_t isysmem = strtoll(s, &end, 10);\n assert(errno == 0 && isysmem > 0);\n isysmem *= 1024;\n sysmem = isysmem;\n }\n fclose(f);\n }\n#else\n size_t memsize = 0;\n size_t len = sizeof(memsize);\n if (sysctlbyname(\"hw.memsize\", &memsize, &len, 0, 0) == 0) {\n sysmem = memsize;\n }\n#endif\n if (sysmem == 0) {\n fprintf(stderr, \"# could not detect total system memory, bailing\\n\");\n exit(1);\n }\n return sysmem;\n}\n\nuint64_t sys_seed(void) {\n #define NSEEDCAP 64\n static __thread int nseeds = 0;\n static __thread uint64_t seeds[NSEEDCAP];\n if (nseeds == 0) {\n // Generate a group of new seeds\n FILE *f = fopen(\"/dev/urandom\", \"rb+\");\n if (!f) {\n perror(\"# /dev/urandom\");\n exit(1);\n }\n size_t n = fread(seeds, 8, NSEEDCAP, f);\n (void)n;\n assert(n == NSEEDCAP);\n fclose(f);\n nseeds = NSEEDCAP;\n }\n return seeds[--nseeds];\n}\n\nstatic int64_t nanotime(struct timespec *ts) {\n int64_t x = ts->tv_sec;\n x *= 1000000000;\n x += ts->tv_nsec;\n return x;\n}\n\n// Return monotonic nanoseconds of the CPU clock.\nint64_t sys_now(void) {\n struct timespec now = { 0 };\n#ifdef __linux__\n clock_gettime(CLOCK_BOOTTIME, &now);\n#elif defined(__APPLE__)\n clock_gettime(CLOCK_UPTIME_RAW, &now);\n#else\n clock_gettime(CLOCK_MONOTONIC, &now);\n#endif\n return nanotime(&now);\n}\n\n// Return unix timestamp in nanoseconds\nint64_t sys_unixnow(void) {\n struct timespec now = { 0 };\n clock_gettime(CLOCK_REALTIME, &now);\n return nanotime(&now);\n}\n\n#ifdef __APPLE__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n task_basic_info_data_t taskInfo;\n mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT;\n kern_return_t kr = task_info(mach_task_self(), TASK_BASIC_INFO,\n (task_info_t)&taskInfo, &infoCount);\n if (kr != KERN_SUCCESS) {\n fprintf(stderr, \"# task_info: %s\\n\", mach_error_string(kr));\n abort();\n }\n info->virt = taskInfo.virtual_size;\n info->rss = taskInfo.resident_size;\n}\n#elif __linux__\nvoid sys_getmeminfo(struct sys_meminfo *info) {\n FILE *f = fopen(\"/proc/self/statm\", \"r\");\n if (!f) {\n perror(\"# open /proc/self/statm\");\n abort();\n }\n unsigned long vm_pages, rss_pages;\n long x = fscanf(f, \"%lu %lu\", &vm_pages, &rss_pages);\n fclose(f);\n if (x != 2) {\n perror(\"# read /proc/self/statm\");\n abort();\n }\n\n // Get the system page size (in bytes)\n size_t page_size = sysconf(_SC_PAGESIZE);\n assert(page_size > 0);\n\n // Convert pages to bytes\n info->virt = vm_pages * page_size;\n info->rss = rss_pages * page_size;\n}\n#endif\n\n#include \n\nconst char *sys_arch(void) {\n static __thread bool got = false;\n static __thread char arch[1024] = \"unknown/error\";\n if (!got) {\n struct utsname unameData;\n if (uname(&unameData) == 0) {\n snprintf(arch, sizeof(arch), \"%s/%s\", unameData.sysname, \n unameData.machine);\n char *p = arch;\n while (*p) {\n *p = tolower(*p);\n p++;\n }\n got = true;\n }\n }\n return arch;\n}\n\nvoid sys_genuseid(char useid[16]) {\n const uint8_t chs[] = \n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"0123456789\";\n uint64_t a = sys_seed();\n uint64_t b = sys_seed();\n uint8_t bytes[16];\n memcpy(bytes, &a, 8);\n memcpy(bytes+8, &b, 8);\n for (int i = 0; i < 16; i++) {\n bytes[i] = chs[bytes[i]%62];\n }\n memcpy(useid, bytes, 16);\n}\n\n// Returns a unique thread id for the current thread.\n// This is an artificial generated value that is always distinct. \nuint64_t sys_threadid(void) {\n static atomic_int_fast64_t next = 0;\n static __thread uint64_t id = 0;\n if (id == 0) {\n id = atomic_fetch_add_explicit(&next, 1, __ATOMIC_RELEASE);\n }\n return id;\n}\n"], ["/pogocache/src/parse.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit parse.c provides the entrypoint for parsing all data \n// for incoming client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"parse.h\"\n#include \"util.h\"\n\n__thread char parse_lasterr[1024] = \"\";\n\nconst char *parse_lasterror(void) {\n return parse_lasterr;\n}\n\nssize_t parse_resp(const char *bytes, size_t len, struct args *args);\nssize_t parse_memcache(const char *data, size_t len, struct args *args,\n bool *noreply);\nssize_t parse_http(const char *data, size_t len, struct args *args,\n int *httpvers, bool *keepalive);\nssize_t parse_resp_telnet(const char *bytes, size_t len, struct args *args);\nssize_t parse_postgres(const char *data, size_t len, struct args *args,\n struct pg **pg);\n\nstatic bool sniff_proto(const char *data, size_t len, int *proto) {\n if (len > 0 && data[0] == '*') {\n *proto = PROTO_RESP;\n return true;\n }\n if (len > 0 && data[0] == '\\0') {\n *proto = PROTO_POSTGRES;\n return true;\n }\n // Parse the first line of text\n size_t n = 0;\n for (size_t i = 0; i < len; i++) {\n if (data[i] == '\\n') {\n n = i+1;\n break;\n }\n }\n // Look for \" HTTP/*.*\\r\\n\" suffix\n if (n >= 11 && memcmp(data+n-11, \" HTTP/\", 5) == 0 && \n data[n-4] == '.' && data[n-2] == '\\r')\n {\n *proto = PROTO_HTTP;\n return true;\n }\n // Trim the prefix, Resp+Telnet and Memcache both allow for spaces between\n // arguments.\n while (*data == ' ') {\n data++;\n n--;\n len--;\n }\n // Treat all uppercase commands as Resp+Telnet\n if (n > 0 && data[0] >= 'A' && data[0] <= 'Z') {\n *proto = PROTO_RESP;\n return true;\n }\n // Look for Memcache commands\n if (n >= 1) {\n *proto = PROTO_MEMCACHE;\n return true;\n }\n // Protocol is unknown\n *proto = 0;\n return false;\n}\n\n// Returns the number of bytes read from data.\n// returns -1 on error\n// returns 0 when there isn't enough data to complete a command.\n// On success, the args and proto will be set to the command arguments and\n// protocol type, respectively.\n//\n// It's required to set proto to 0 for the first command, per client.\n// Then continue to provide the last known proto. \n// This allows for the parser to learn and predict the protocol for ambiguous\n// protocols; like Resp+Telnet, Memcache+Text, HTTP, etc.\n//\n// The noreply param is an output param that is only set when the proto is\n// memcache. The argument is stripped from the args array,\n// but made available to the caller in case it needs to be known.\n//\n// The keepalive param is an output param that is only set when the proto is\n// http. It's used to let the caller know to keep the connection alive for\n// another request.\nssize_t parse_command(const void *data, size_t len, struct args *args, \n int *proto, bool *noreply, int *httpvers, bool *keepalive, struct pg **pg)\n{\n args_clear(args);\n parse_lasterr[0] = '\\0';\n *httpvers = 0;\n *noreply = false;\n *keepalive = false;\n // Sniff for the protocol. This should only happen once per client, upon\n // their first request.\n if (*proto == 0) {\n if (!sniff_proto(data, len, proto)) {\n // Unknown protocol\n goto fail;\n }\n if (*proto == 0) {\n // Not enough data to determine yet\n return 0;\n }\n }\n if (*proto == PROTO_RESP) {\n const uint8_t *bytes = data;\n if (bytes[0] == '*') {\n return parse_resp(data, len, args);\n } else {\n return parse_resp_telnet(data, len, args);\n }\n } else if (*proto == PROTO_MEMCACHE) {\n return parse_memcache(data, len, args, noreply);\n } else if (*proto == PROTO_HTTP) {\n return parse_http(data, len, args, httpvers, keepalive);\n } else if (*proto == PROTO_POSTGRES) {\n return parse_postgres(data, len, args, pg);\n }\nfail:\n parse_seterror(\"ERROR\");\n return -1;\n}\n\n"], ["/pogocache/src/args.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit args.c provides functions for managing command arguments\n#include \n#include \n#include \n#include \"args.h\"\n#include \"xmalloc.h\"\n#include \"util.h\"\n\nconst char *args_at(struct args *args, int idx, size_t *len) {\n *len = args->bufs[idx].len;\n return args->bufs[idx].data;\n}\n\nint args_count(struct args *args) {\n return args->len;\n}\n\nbool args_eq(struct args *args, int index, const char *str) {\n if ((size_t)index >= args->len) {\n return false;\n }\n size_t alen = args->bufs[index].len;\n const char *arg = args->bufs[index].data;\n size_t slen = strlen(str); \n if (alen != slen) {\n return false;\n }\n for (size_t i = 0; i < slen ; i++) {\n if (tolower(str[i]) != tolower(arg[i])) {\n return false;\n }\n }\n return true;\n}\n\nvoid args_append(struct args *args, const char *data, size_t len,\n bool zerocopy)\n{\n#ifdef NOZEROCOPY\n zerocopy = 0;\n#endif\n if (args->len == args->cap) {\n args->cap = args->cap == 0 ? 4 : args->cap*2;\n args->bufs = xrealloc(args->bufs, args->cap * sizeof(struct buf));\n memset(&args->bufs[args->len], 0, (args->cap-args->len) * \n sizeof(struct buf));\n }\n if (zerocopy) {\n buf_clear(&args->bufs[args->len]);\n args->bufs[args->len].len = len;\n args->bufs[args->len].data = (char*)data;\n } else {\n args->bufs[args->len].len = 0;\n buf_append(&args->bufs[args->len], data, len);\n }\n if (args->len == 0) {\n args->zerocopy = zerocopy;\n } else {\n args->zerocopy = args->zerocopy && zerocopy;\n }\n args->len++;\n}\n\nvoid args_clear(struct args *args) {\n if (!args->zerocopy) {\n for (size_t i = 0 ; i < args->len; i++) {\n buf_clear(&args->bufs[i]);\n }\n }\n args->len = 0;\n}\n\nvoid args_free(struct args *args) {\n args_clear(args);\n xfree(args->bufs);\n}\n\nvoid args_print(struct args *args) {\n printf(\". \");\n for (size_t i = 0; i < args->len; i++) {\n char *buf = args->bufs[i].data;\n int len = args->bufs[i].len;\n printf(\"[\"); \n binprint(buf, len);\n printf(\"] \");\n }\n printf(\"\\n\");\n}\n\n// remove the first item\nvoid args_remove_first(struct args *args) {\n if (args->len > 0) {\n buf_clear(&args->bufs[0]);\n for (size_t i = 1; i < args->len; i++) {\n args->bufs[i-1] = args->bufs[i];\n }\n args->len--;\n }\n}\n"], ["/pogocache/src/tls.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit tls.c provides an interface for translating TLS bytes streams.\n// This is intended to be used with client connections.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"tls.h\"\n#include \"xmalloc.h\"\n#include \"openssl.h\"\n\n#ifdef NOOPENSSL\n\nvoid tls_init(void) {}\nbool tls_accept(int fd, struct tls **tls_out) {\n (void)fd;\n *tls_out = 0;\n return true;\n}\nint tls_close(struct tls *tls, int fd) {\n (void)tls;\n return close(fd);\n}\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n (void)tls;\n return read(fd, data, len);\n}\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n (void)tls;\n return write(fd, data, len);\n}\n#else\n\nextern const bool usetls;\nextern const char *tlscertfile;\nextern const char *tlscacertfile;\nextern const char *tlskeyfile;\n\nstatic SSL_CTX *ctx;\n\nstruct tls {\n SSL *ssl;\n};\n\nvoid tls_init(void) {\n if (!usetls) {\n return;\n }\n ctx = SSL_CTX_new(TLS_server_method());\n if (!SSL_CTX_load_verify_locations(ctx, tlscacertfile, 0)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(1);\n }\n if (!SSL_CTX_use_certificate_file(ctx, tlscertfile , SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_use_PrivateKey_file(ctx, tlskeyfile, SSL_FILETYPE_PEM)) {\n printf(\"# Error initializing tls, details to follow...\\n\");\n ERR_print_errors_fp(stderr);\n exit(EXIT_FAILURE);\n }\n if (!SSL_CTX_check_private_key(ctx)) {\n printf(\"# tls: private key does not match the certificate\\n\");\n exit(EXIT_FAILURE);\n }\n}\n\nbool tls_accept(int fd, struct tls **tls_out) {\n if (!usetls) {\n // tls is disabled for all of pogocache.\n *tls_out = 0;\n return true;\n }\n SSL *ssl = SSL_new(ctx);\n if (!ssl) {\n printf(\"# tls: SSL_new() failed\\n\");\n *tls_out = 0;\n return false;\n }\n SSL_set_fd(ssl, fd);\n SSL_set_verify(ssl, SSL_VERIFY_PEER, 0);\n int ret = SSL_accept(ssl);\n if (ret <= 0) {\n int err = SSL_get_error(ssl, ret);\n if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {\n printf(\"# tls: SSL_accept() failed\\n\");\n ERR_print_errors_fp(stderr);\n SSL_free(ssl);\n *tls_out = 0;\n return false;\n }\n }\n struct tls *tls = xmalloc(sizeof(struct tls));\n memset(tls, 0, sizeof(struct tls));\n tls->ssl = ssl;\n *tls_out = tls;\n return true;\n}\n\nint tls_close(struct tls *tls, int fd) {\n if (tls) {\n if (SSL_shutdown(tls->ssl) == 0) {\n SSL_shutdown(tls->ssl);\n }\n SSL_free(tls->ssl);\n xfree(tls);\n }\n return close(fd);\n}\n\nssize_t tls_write(struct tls *tls, int fd, const void *data, size_t len) {\n if (!tls) {\n return write(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_write_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else {\n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\nssize_t tls_read(struct tls *tls, int fd, void *data, size_t len) {\n if (!tls) {\n return read(fd, data, len);\n }\n size_t nbytes;\n int ret = SSL_read_ex(tls->ssl, data, len, &nbytes);\n if (ret == 1) {\n return nbytes;\n }\n int err = SSL_get_error(tls->ssl, ret);\n if (err == SSL_ERROR_ZERO_RETURN) {\n return 0;\n }\n if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {\n // Non-blocking I/O, try again later\n errno = EAGAIN;\n } else { \n // Unreliable errno. Fallback to EIO.\n errno = EIO;\n }\n return -1;\n}\n\n#endif\n"], ["/pogocache/src/xmalloc.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit xmalloc.c is the primary allocator interface. The xmalloc/xfree\n// functions should be used instead of malloc/free.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"sys.h\"\n#include \"xmalloc.h\"\n\n#if defined(__linux__) && defined(__GLIBC__)\n#include \n#define HAS_MALLOC_H\n#endif\n\n// from main.c\nextern const int useallocator;\nextern const bool usetrackallocs;\n\n#ifdef NOTRACKALLOCS\n#define add_alloc()\n#define sub_alloc()\nsize_t xallocs(void) {\n return 0;\n}\n#else\nstatic atomic_int_fast64_t nallocs = 0;\n\nsize_t xallocs(void) {\n if (usetrackallocs) {\n return atomic_load(&nallocs);\n } else {\n return 0;\n }\n}\n\nstatic void add_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_add_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n\nstatic void sub_alloc(void) {\n if (usetrackallocs) {\n atomic_fetch_sub_explicit(&nallocs, 1, __ATOMIC_RELAXED);\n }\n}\n#endif\n\nstatic void check_ptr(void *ptr) {\n if (!ptr) {\n fprintf(stderr, \"# %s\\n\", strerror(ENOMEM));\n abort();\n }\n}\n\nvoid *xmalloc(size_t size) {\n void *ptr = malloc(size);\n check_ptr(ptr);\n add_alloc();\n return ptr;\n}\n\nvoid *xrealloc(void *ptr, size_t size) {\n if (!ptr) {\n return xmalloc(size);\n }\n ptr = realloc(ptr, size);\n check_ptr(ptr);\n return ptr;\n}\n\nvoid xfree(void *ptr) {\n if (!ptr) {\n return;\n }\n free(ptr);\n sub_alloc();\n}\n\nvoid xpurge(void) {\n#ifdef HAS_MALLOC_H\n // Releases unused heap memory to OS\n malloc_trim(0);\n#endif\n}\n"], ["/pogocache/src/buf.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit buf.c is a simple interface for creating byte buffers\n#include \n#include \"xmalloc.h\"\n#include \"util.h\"\n#include \"buf.h\"\n\nvoid buf_ensure(struct buf *buf, size_t len) {\n if (buf->len+len > buf->cap) {\n size_t oldcap = buf->cap;\n size_t newcap = buf->cap;\n if (oldcap == 0) {\n buf->data = 0;\n newcap = 16;\n } else {\n newcap *= 2;\n }\n while (buf->len+len > newcap) {\n newcap *= 2;\n }\n buf->data = xrealloc(buf->data, newcap);\n buf->cap = newcap;\n }\n}\n\nvoid buf_append(struct buf *buf, const void *data, size_t len){\n buf_ensure(buf, len);\n memcpy(buf->data+buf->len, data, len);\n buf->len += len;\n}\n\nvoid buf_append_byte(struct buf *buf, char byte) {\n if (buf->len < buf->cap) {\n buf->data[buf->len++] = byte;\n } else {\n buf_append(buf, &byte, 1);\n }\n}\n\nvoid buf_clear(struct buf *buf) {\n // No capacity means this buffer is owned somewhere else and we \n // must not free the data.\n if (buf->cap) {\n xfree(buf->data);\n }\n memset(buf, 0, sizeof(struct buf));\n}\n\nvoid buf_append_uvarint(struct buf *buf, uint64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_u64(buf->data+buf->len, x);\n buf->len += n;\n}\n\nvoid buf_append_varint(struct buf *buf, int64_t x) {\n buf_ensure(buf, 10);\n int n = varint_write_i64(buf->data+buf->len, x);\n buf->len += n;\n}\n"], ["/pogocache/src/stats.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n//\n// Unit stats.c tracks various stats. Mostly for the memcache protocol.\n#include \n#include \"stats.h\"\n\nstatic atomic_uint_fast64_t g_stat_cmd_flush = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_touch = 0;\nstatic atomic_uint_fast64_t g_stat_cmd_meta = 0;\nstatic atomic_uint_fast64_t g_stat_get_expired = 0;\nstatic atomic_uint_fast64_t g_stat_get_flushed = 0;\nstatic atomic_uint_fast64_t g_stat_delete_misses = 0;\nstatic atomic_uint_fast64_t g_stat_delete_hits = 0;\nstatic atomic_uint_fast64_t g_stat_incr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_incr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_decr_misses = 0;\nstatic atomic_uint_fast64_t g_stat_decr_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_misses = 0;\nstatic atomic_uint_fast64_t g_stat_cas_hits = 0;\nstatic atomic_uint_fast64_t g_stat_cas_badval = 0;\nstatic atomic_uint_fast64_t g_stat_touch_hits = 0;\nstatic atomic_uint_fast64_t g_stat_touch_misses = 0;\nstatic atomic_uint_fast64_t g_stat_store_too_large = 0;\nstatic atomic_uint_fast64_t g_stat_store_no_memory = 0;\nstatic atomic_uint_fast64_t g_stat_auth_cmds = 0;\nstatic atomic_uint_fast64_t g_stat_auth_errors = 0;\n\nvoid stat_cmd_flush_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_flush, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_touch_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_touch, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cmd_meta_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cmd_meta, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_expired_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_expired, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_get_flushed_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_get_flushed, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_delete_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_delete_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_incr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_incr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_decr_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_decr_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_cas_badval_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_cas_badval, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_hits_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_hits, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_touch_misses_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_touch_misses, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_too_large_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_too_large, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_store_no_memory_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_store_no_memory, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_cmds_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_cmds, 1, __ATOMIC_RELAXED);\n}\n\nvoid stat_auth_errors_incr(struct conn *conn) {\n (void)conn;\n atomic_fetch_add_explicit(&g_stat_auth_errors, 1, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_flush(void) {\n return atomic_load_explicit(&g_stat_cmd_flush, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_touch(void) {\n return atomic_load_explicit(&g_stat_cmd_touch, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cmd_meta(void) {\n return atomic_load_explicit(&g_stat_cmd_meta, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_expired(void) {\n return atomic_load_explicit(&g_stat_get_expired, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_get_flushed(void) {\n return atomic_load_explicit(&g_stat_get_flushed, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_misses(void) {\n return atomic_load_explicit(&g_stat_delete_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_delete_hits(void) {\n return atomic_load_explicit(&g_stat_delete_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_misses(void) {\n return atomic_load_explicit(&g_stat_incr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_incr_hits(void) {\n return atomic_load_explicit(&g_stat_incr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_misses(void) {\n return atomic_load_explicit(&g_stat_decr_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_decr_hits(void) {\n return atomic_load_explicit(&g_stat_decr_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_misses(void) {\n return atomic_load_explicit(&g_stat_cas_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_hits(void) {\n return atomic_load_explicit(&g_stat_cas_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_cas_badval(void) {\n return atomic_load_explicit(&g_stat_cas_badval, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_hits(void) {\n return atomic_load_explicit(&g_stat_touch_hits, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_touch_misses(void) {\n return atomic_load_explicit(&g_stat_touch_misses, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_too_large(void) {\n return atomic_load_explicit(&g_stat_store_too_large, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_store_no_memory(void) {\n return atomic_load_explicit(&g_stat_store_no_memory, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_cmds(void) {\n return atomic_load_explicit(&g_stat_auth_cmds, __ATOMIC_RELAXED);\n}\n\nuint64_t stat_auth_errors(void) {\n return atomic_load_explicit(&g_stat_auth_errors, __ATOMIC_RELAXED);\n}\n\n\n"], ["/pogocache/src/uring.c", "// https://github.com/tidwall/pogocache\n//\n// Copyright 2025 Polypoint Labs, LLC. All rights reserved.\n// This file is part of the Pogocache project.\n// Use of this source code is governed by the AGPL that can be found in\n// the LICENSE file.\n//\n// For alternative licensing options or general questions, please contact\n// us at licensing@polypointlabs.com.\n#include \n#include \"uring.h\"\n\nbool uring_available(void) {\n#ifdef NOURING\n return false;\n#else\n static atomic_int available = -1;\n if (atomic_load_explicit(&available, __ATOMIC_ACQUIRE )== -1) {\n struct io_uring ring;\n if (io_uring_queue_init(1, &ring, 0) == 0) {\n io_uring_queue_exit(&ring);\n atomic_store(&available, 1);\n } else {\n atomic_store(&available, 0);\n }\n }\n return atomic_load_explicit(&available, __ATOMIC_ACQUIRE) == 1;\n#endif\n}\n"]], "task_instance_info": {"created_time": "2025-08-20 18:04:05", "created_task_model": "DeepSeek-R1", "function_skeleton": "int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) {}", "code_description": "This code segment functions to: Enable compression using a predefined external dictionary context by resetting hash tables and enforcing references to the provided dictionary data during match searches."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```c\n[TASK_BEGIN]\nint LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const ctx = &LZ4_dict->internal_donotuse;\n const int acceleration = 1;\n const int maxOutputSize = LZ4_compressBound(srcSize);\n const tableType_t tableType = byU32;\n\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, source, dest, srcSize,\n NULL, maxOutputSize, notLimited, tableType,\n usingExtDict, noDictIssue, acceleration);\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 18-05-03"}, "editdistance_info": {"edit_distance": 41.0959, "calculate_time": "2025-08-20 18:05:03", "true_code_clean": "int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;\n int result;\n LZ4_renormDictT(streamPtr, srcSize);\n if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);\n } else {\n result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);\n }\n streamPtr->dictionary = (const BYTE*)source;\n streamPtr->dictSize = (U32)srcSize;\n return result;\n}", "predict_code_clean": "int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)\n{\n LZ4_stream_t_internal* const ctx = &LZ4_dict->internal_donotuse;\n const int acceleration = 1;\n const int maxOutputSize = LZ4_compressBound(srcSize);\n const tableType_t tableType = byU32;\n LZ4_prepareTable(ctx, srcSize, tableType);\n return LZ4_compress_generic(ctx, source, dest, srcSize,\n NULL, maxOutputSize, notLimited, tableType,\n usingExtDict, noDictIssue, acceleration);\n}"}}