ADAPT-Chase commited on
Commit
b35e045
·
verified ·
1 Parent(s): 10ef3f1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ckh.h +101 -0
  3. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/counter.h +34 -0
  4. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ctl.h +159 -0
  5. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/decay.h +186 -0
  6. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/div.h +41 -0
  7. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ecache.h +55 -0
  8. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/edata.h +698 -0
  9. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/edata_cache.h +49 -0
  10. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ehooks.h +412 -0
  11. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/emap.h +357 -0
  12. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/emitter.h +510 -0
  13. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/eset.h +77 -0
  14. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/exp_grow.h +50 -0
  15. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/extent.h +137 -0
  16. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/extent_dss.h +26 -0
  17. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/extent_mmap.h +10 -0
  18. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/fb.h +373 -0
  19. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/fxp.h +126 -0
  20. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hash.h +320 -0
  21. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hook.h +163 -0
  22. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpa.h +182 -0
  23. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpa_hooks.h +17 -0
  24. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpa_opts.h +74 -0
  25. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpdata.h +413 -0
  26. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/inspect.h +40 -0
  27. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h +108 -0
  28. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in +427 -0
  29. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h +75 -0
  30. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h +84 -0
  31. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h +122 -0
  32. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h +103 -0
  33. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h +391 -0
  34. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h +111 -0
  35. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h +130 -0
  36. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in +263 -0
  37. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/large_externs.h +24 -0
  38. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/lockedint.h +204 -0
  39. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/log.h +115 -0
  40. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/malloc_io.h +105 -0
  41. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/mpsc_queue.h +134 -0
  42. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/mutex.h +319 -0
  43. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/mutex_prof.h +117 -0
  44. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/nstime.h +73 -0
  45. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pa.h +243 -0
  46. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pac.h +179 -0
  47. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pages.h +119 -0
  48. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pai.h +95 -0
  49. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/peak.h +37 -0
  50. platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/peak_event.h +24 -0
.gitattributes CHANGED
@@ -2757,3 +2757,6 @@ platform/dbops/binaries/redis/src/deps/hiredis/async.o filter=lfs diff=lfs merge
2757
  platform/dbops/binaries/redis/src/deps/hiredis/hiredis.o filter=lfs diff=lfs merge=lfs -text
2758
  platform/dbops/binaries/redis/src/deps/hiredis/sds.o filter=lfs diff=lfs merge=lfs -text
2759
  platform/dbops/binaries/redis/src/deps/hiredis/libhiredis.a filter=lfs diff=lfs merge=lfs -text
 
 
 
 
2757
  platform/dbops/binaries/redis/src/deps/hiredis/hiredis.o filter=lfs diff=lfs merge=lfs -text
2758
  platform/dbops/binaries/redis/src/deps/hiredis/sds.o filter=lfs diff=lfs merge=lfs -text
2759
  platform/dbops/binaries/redis/src/deps/hiredis/libhiredis.a filter=lfs diff=lfs merge=lfs -text
2760
+ platform/dbops/binaries/redis/src/deps/lua/src/liblua.a filter=lfs diff=lfs merge=lfs -text
2761
+ platform/dbops/binaries/redis/src/deps/lua/src/lua filter=lfs diff=lfs merge=lfs -text
2762
+ platform/dbops/binaries/redis/src/deps/lua/src/luac filter=lfs diff=lfs merge=lfs -text
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ckh.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_CKH_H
2
+ #define JEMALLOC_INTERNAL_CKH_H
3
+
4
+ #include "jemalloc/internal/tsd.h"
5
+
6
+ /* Cuckoo hashing implementation. Skip to the end for the interface. */
7
+
8
+ /******************************************************************************/
9
+ /* INTERNAL DEFINITIONS -- IGNORE */
10
+ /******************************************************************************/
11
+
12
+ /* Maintain counters used to get an idea of performance. */
13
+ /* #define CKH_COUNT */
14
+ /* Print counter values in ckh_delete() (requires CKH_COUNT). */
15
+ /* #define CKH_VERBOSE */
16
+
17
+ /*
18
+ * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
19
+ * one bucket per L1 cache line.
20
+ */
21
+ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
22
+
23
+ /* Typedefs to allow easy function pointer passing. */
24
+ typedef void ckh_hash_t (const void *, size_t[2]);
25
+ typedef bool ckh_keycomp_t (const void *, const void *);
26
+
27
+ /* Hash table cell. */
28
+ typedef struct {
29
+ const void *key;
30
+ const void *data;
31
+ } ckhc_t;
32
+
33
+ /* The hash table itself. */
34
+ typedef struct {
35
+ #ifdef CKH_COUNT
36
+ /* Counters used to get an idea of performance. */
37
+ uint64_t ngrows;
38
+ uint64_t nshrinks;
39
+ uint64_t nshrinkfails;
40
+ uint64_t ninserts;
41
+ uint64_t nrelocs;
42
+ #endif
43
+
44
+ /* Used for pseudo-random number generation. */
45
+ uint64_t prng_state;
46
+
47
+ /* Total number of items. */
48
+ size_t count;
49
+
50
+ /*
51
+ * Minimum and current number of hash table buckets. There are
52
+ * 2^LG_CKH_BUCKET_CELLS cells per bucket.
53
+ */
54
+ unsigned lg_minbuckets;
55
+ unsigned lg_curbuckets;
56
+
57
+ /* Hash and comparison functions. */
58
+ ckh_hash_t *hash;
59
+ ckh_keycomp_t *keycomp;
60
+
61
+ /* Hash table with 2^lg_curbuckets buckets. */
62
+ ckhc_t *tab;
63
+ } ckh_t;
64
+
65
+ /******************************************************************************/
66
+ /* BEGIN PUBLIC API */
67
+ /******************************************************************************/
68
+
69
+ /* Lifetime management. Minitems is the initial capacity. */
70
+ bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
71
+ ckh_keycomp_t *keycomp);
72
+ void ckh_delete(tsd_t *tsd, ckh_t *ckh);
73
+
74
+ /* Get the number of elements in the set. */
75
+ size_t ckh_count(ckh_t *ckh);
76
+
77
+ /*
78
+ * To iterate over the elements in the table, initialize *tabind to 0 and call
79
+ * this function until it returns true. Each call that returns false will
80
+ * update *key and *data to the next element in the table, assuming the pointers
81
+ * are non-NULL.
82
+ */
83
+ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
84
+
85
+ /*
86
+ * Basic hash table operations -- insert, removal, lookup. For ckh_remove and
87
+ * ckh_search, key or data can be NULL. The hash-table only stores pointers to
88
+ * the key and value, and doesn't do any lifetime management.
89
+ */
90
+ bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
91
+ bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
92
+ void **data);
93
+ bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
94
+
95
+ /* Some useful hash and comparison functions for strings and pointers. */
96
+ void ckh_string_hash(const void *key, size_t r_hash[2]);
97
+ bool ckh_string_keycomp(const void *k1, const void *k2);
98
+ void ckh_pointer_hash(const void *key, size_t r_hash[2]);
99
+ bool ckh_pointer_keycomp(const void *k1, const void *k2);
100
+
101
+ #endif /* JEMALLOC_INTERNAL_CKH_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/counter.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_COUNTER_H
2
+ #define JEMALLOC_INTERNAL_COUNTER_H
3
+
4
+ #include "jemalloc/internal/mutex.h"
5
+
6
+ typedef struct counter_accum_s {
7
+ LOCKEDINT_MTX_DECLARE(mtx)
8
+ locked_u64_t accumbytes;
9
+ uint64_t interval;
10
+ } counter_accum_t;
11
+
12
+ JEMALLOC_ALWAYS_INLINE bool
13
+ counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
14
+ uint64_t interval = counter->interval;
15
+ assert(interval > 0);
16
+ LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
17
+ /*
18
+ * If the event moves fast enough (and/or if the event handling is slow
19
+ * enough), extreme overflow can cause counter trigger coalescing.
20
+ * This is an intentional mechanism that avoids rate-limiting
21
+ * allocation.
22
+ */
23
+ bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
24
+ &counter->accumbytes, bytes, interval);
25
+ LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
26
+ return overflow;
27
+ }
28
+
29
+ bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
30
+ void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
31
+ void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
32
+ void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
33
+
34
+ #endif /* JEMALLOC_INTERNAL_COUNTER_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ctl.h ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_CTL_H
2
+ #define JEMALLOC_INTERNAL_CTL_H
3
+
4
+ #include "jemalloc/internal/jemalloc_internal_types.h"
5
+ #include "jemalloc/internal/malloc_io.h"
6
+ #include "jemalloc/internal/mutex_prof.h"
7
+ #include "jemalloc/internal/ql.h"
8
+ #include "jemalloc/internal/sc.h"
9
+ #include "jemalloc/internal/stats.h"
10
+
11
+ /* Maximum ctl tree depth. */
12
+ #define CTL_MAX_DEPTH 7
13
+
14
+ typedef struct ctl_node_s {
15
+ bool named;
16
+ } ctl_node_t;
17
+
18
+ typedef struct ctl_named_node_s {
19
+ ctl_node_t node;
20
+ const char *name;
21
+ /* If (nchildren == 0), this is a terminal node. */
22
+ size_t nchildren;
23
+ const ctl_node_t *children;
24
+ int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
25
+ size_t);
26
+ } ctl_named_node_t;
27
+
28
+ typedef struct ctl_indexed_node_s {
29
+ struct ctl_node_s node;
30
+ const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
31
+ size_t);
32
+ } ctl_indexed_node_t;
33
+
34
+ typedef struct ctl_arena_stats_s {
35
+ arena_stats_t astats;
36
+
37
+ /* Aggregate stats for small size classes, based on bin stats. */
38
+ size_t allocated_small;
39
+ uint64_t nmalloc_small;
40
+ uint64_t ndalloc_small;
41
+ uint64_t nrequests_small;
42
+ uint64_t nfills_small;
43
+ uint64_t nflushes_small;
44
+
45
+ bin_stats_data_t bstats[SC_NBINS];
46
+ arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
47
+ pac_estats_t estats[SC_NPSIZES];
48
+ hpa_shard_stats_t hpastats;
49
+ sec_stats_t secstats;
50
+ } ctl_arena_stats_t;
51
+
52
+ typedef struct ctl_stats_s {
53
+ size_t allocated;
54
+ size_t active;
55
+ size_t metadata;
56
+ size_t metadata_thp;
57
+ size_t resident;
58
+ size_t mapped;
59
+ size_t retained;
60
+
61
+ background_thread_stats_t background_thread;
62
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
63
+ } ctl_stats_t;
64
+
65
+ typedef struct ctl_arena_s ctl_arena_t;
66
+ struct ctl_arena_s {
67
+ unsigned arena_ind;
68
+ bool initialized;
69
+ ql_elm(ctl_arena_t) destroyed_link;
70
+
71
+ /* Basic stats, supported even if !config_stats. */
72
+ unsigned nthreads;
73
+ const char *dss;
74
+ ssize_t dirty_decay_ms;
75
+ ssize_t muzzy_decay_ms;
76
+ size_t pactive;
77
+ size_t pdirty;
78
+ size_t pmuzzy;
79
+
80
+ /* NULL if !config_stats. */
81
+ ctl_arena_stats_t *astats;
82
+ };
83
+
84
+ typedef struct ctl_arenas_s {
85
+ uint64_t epoch;
86
+ unsigned narenas;
87
+ ql_head(ctl_arena_t) destroyed;
88
+
89
+ /*
90
+ * Element 0 corresponds to merged stats for extant arenas (accessed via
91
+ * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
92
+ * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
93
+ * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
94
+ */
95
+ ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
96
+ } ctl_arenas_t;
97
+
98
+ int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
99
+ void *newp, size_t newlen);
100
+ int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
101
+ int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
102
+ size_t *oldlenp, void *newp, size_t newlen);
103
+ int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
104
+ size_t *miblenp);
105
+ int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
106
+ size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
107
+ bool ctl_boot(void);
108
+ void ctl_prefork(tsdn_t *tsdn);
109
+ void ctl_postfork_parent(tsdn_t *tsdn);
110
+ void ctl_postfork_child(tsdn_t *tsdn);
111
+ void ctl_mtx_assert_held(tsdn_t *tsdn);
112
+
113
+ #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
114
+ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
115
+ != 0) { \
116
+ malloc_printf( \
117
+ "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
118
+ name); \
119
+ abort(); \
120
+ } \
121
+ } while (0)
122
+
123
+ #define xmallctlnametomib(name, mibp, miblenp) do { \
124
+ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
125
+ malloc_printf("<jemalloc>: Failure in " \
126
+ "xmallctlnametomib(\"%s\", ...)\n", name); \
127
+ abort(); \
128
+ } \
129
+ } while (0)
130
+
131
+ #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
132
+ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
133
+ newlen) != 0) { \
134
+ malloc_write( \
135
+ "<jemalloc>: Failure in xmallctlbymib()\n"); \
136
+ abort(); \
137
+ } \
138
+ } while (0)
139
+
140
+ #define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \
141
+ if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
142
+ != 0) { \
143
+ malloc_write( \
144
+ "<jemalloc>: Failure in ctl_mibnametomib()\n"); \
145
+ abort(); \
146
+ } \
147
+ } while (0)
148
+
149
+ #define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \
150
+ newp, newlen) do { \
151
+ if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
152
+ oldp, oldlenp, newp, newlen) != 0) { \
153
+ malloc_write( \
154
+ "<jemalloc>: Failure in ctl_bymibname()\n"); \
155
+ abort(); \
156
+ } \
157
+ } while (0)
158
+
159
+ #endif /* JEMALLOC_INTERNAL_CTL_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/decay.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_DECAY_H
2
+ #define JEMALLOC_INTERNAL_DECAY_H
3
+
4
+ #include "jemalloc/internal/smoothstep.h"
5
+
6
+ #define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
7
+
8
+ /*
9
+ * The decay_t computes the number of pages we should purge at any given time.
10
+ * Page allocators inform a decay object when pages enter a decay-able state
11
+ * (i.e. dirty or muzzy), and query it to determine how many pages should be
12
+ * purged at any given time.
13
+ *
14
+ * This is mostly a single-threaded data structure and doesn't care about
15
+ * synchronization at all; it's the caller's responsibility to manage their
16
+ * synchronization on their own. There are two exceptions:
17
+ * 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query).
18
+ * 2) The mtx and purging fields live (and are initialized) here, but are
19
+ * logically owned by the page allocator. This is just a convenience (since
20
+ * those fields would be duplicated for both the dirty and muzzy states
21
+ * otherwise).
22
+ */
23
+ typedef struct decay_s decay_t;
24
+ struct decay_s {
25
+ /* Synchronizes all non-atomic fields. */
26
+ malloc_mutex_t mtx;
27
+ /*
28
+ * True if a thread is currently purging the extents associated with
29
+ * this decay structure.
30
+ */
31
+ bool purging;
32
+ /*
33
+ * Approximate time in milliseconds from the creation of a set of unused
34
+ * dirty pages until an equivalent set of unused dirty pages is purged
35
+ * and/or reused.
36
+ */
37
+ atomic_zd_t time_ms;
38
+ /* time / SMOOTHSTEP_NSTEPS. */
39
+ nstime_t interval;
40
+ /*
41
+ * Time at which the current decay interval logically started. We do
42
+ * not actually advance to a new epoch until sometime after it starts
43
+ * because of scheduling and computation delays, and it is even possible
44
+ * to completely skip epochs. In all cases, during epoch advancement we
45
+ * merge all relevant activity into the most recently recorded epoch.
46
+ */
47
+ nstime_t epoch;
48
+ /* Deadline randomness generator. */
49
+ uint64_t jitter_state;
50
+ /*
51
+ * Deadline for current epoch. This is the sum of interval and per
52
+ * epoch jitter which is a uniform random variable in [0..interval).
53
+ * Epochs always advance by precise multiples of interval, but we
54
+ * randomize the deadline to reduce the likelihood of arenas purging in
55
+ * lockstep.
56
+ */
57
+ nstime_t deadline;
58
+ /*
59
+ * The number of pages we cap ourselves at in the current epoch, per
60
+ * decay policies. Updated on an epoch change. After an epoch change,
61
+ * the caller should take steps to try to purge down to this amount.
62
+ */
63
+ size_t npages_limit;
64
+ /*
65
+ * Number of unpurged pages at beginning of current epoch. During epoch
66
+ * advancement we use the delta between arena->decay_*.nunpurged and
67
+ * ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
68
+ * if any, were generated.
69
+ */
70
+ size_t nunpurged;
71
+ /*
72
+ * Trailing log of how many unused dirty pages were generated during
73
+ * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
74
+ * element is the most recent epoch. Corresponding epoch times are
75
+ * relative to epoch.
76
+ *
77
+ * Updated only on epoch advance, triggered by
78
+ * decay_maybe_advance_epoch, below.
79
+ */
80
+ size_t backlog[SMOOTHSTEP_NSTEPS];
81
+
82
+ /* Peak number of pages in associated extents. Used for debug only. */
83
+ uint64_t ceil_npages;
84
+ };
85
+
86
+ /*
87
+ * The current decay time setting. This is the only public access to a decay_t
88
+ * that's allowed without holding mtx.
89
+ */
90
+ static inline ssize_t
91
+ decay_ms_read(const decay_t *decay) {
92
+ return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
93
+ }
94
+
95
+ /*
96
+ * See the comment on the struct field -- the limit on pages we should allow in
97
+ * this decay state this epoch.
98
+ */
99
+ static inline size_t
100
+ decay_npages_limit_get(const decay_t *decay) {
101
+ return decay->npages_limit;
102
+ }
103
+
104
+ /* How many unused dirty pages were generated during the last epoch. */
105
+ static inline size_t
106
+ decay_epoch_npages_delta(const decay_t *decay) {
107
+ return decay->backlog[SMOOTHSTEP_NSTEPS - 1];
108
+ }
109
+
110
+ /*
111
+ * Current epoch duration, in nanoseconds. Given that new epochs are started
112
+ * somewhat haphazardly, this is not necessarily exactly the time between any
113
+ * two calls to decay_maybe_advance_epoch; see the comments on fields in the
114
+ * decay_t.
115
+ */
116
+ static inline uint64_t
117
+ decay_epoch_duration_ns(const decay_t *decay) {
118
+ return nstime_ns(&decay->interval);
119
+ }
120
+
121
+ static inline bool
122
+ decay_immediately(const decay_t *decay) {
123
+ ssize_t decay_ms = decay_ms_read(decay);
124
+ return decay_ms == 0;
125
+ }
126
+
127
+ static inline bool
128
+ decay_disabled(const decay_t *decay) {
129
+ ssize_t decay_ms = decay_ms_read(decay);
130
+ return decay_ms < 0;
131
+ }
132
+
133
+ /* Returns true if decay is enabled and done gradually. */
134
+ static inline bool
135
+ decay_gradually(const decay_t *decay) {
136
+ ssize_t decay_ms = decay_ms_read(decay);
137
+ return decay_ms > 0;
138
+ }
139
+
140
+ /*
141
+ * Returns true if the passed in decay time setting is valid.
142
+ * < -1 : invalid
143
+ * -1 : never decay
144
+ * 0 : decay immediately
145
+ * > 0 : some positive decay time, up to a maximum allowed value of
146
+ * NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early
147
+ * 27th century. By that time, we expect to have implemented alternate purging
148
+ * strategies.
149
+ */
150
+ bool decay_ms_valid(ssize_t decay_ms);
151
+
152
+ /*
153
+ * As a precondition, the decay_t must be zeroed out (as if with memset).
154
+ *
155
+ * Returns true on error.
156
+ */
157
+ bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
158
+
159
+ /*
160
+ * Given an already-initialized decay_t, reinitialize it with the given decay
161
+ * time. The decay_t must have previously been initialized (and should not then
162
+ * be zeroed).
163
+ */
164
+ void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
165
+
166
+ /*
167
+ * Compute how many of 'npages_new' pages we would need to purge in 'time'.
168
+ */
169
+ uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
170
+ size_t npages_new);
171
+
172
+ /* Returns true if the epoch advanced and there are pages to purge. */
173
+ bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
174
+ size_t current_npages);
175
+
176
+ /*
177
+ * Calculates wait time until a number of pages in the interval
178
+ * [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged.
179
+ *
180
+ * Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
181
+ * indefinite wait.
182
+ */
183
+ uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
184
+ uint64_t npages_threshold);
185
+
186
+ #endif /* JEMALLOC_INTERNAL_DECAY_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/div.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_DIV_H
2
+ #define JEMALLOC_INTERNAL_DIV_H
3
+
4
+ #include "jemalloc/internal/assert.h"
5
+
6
+ /*
7
+ * This module does the division that computes the index of a region in a slab,
8
+ * given its offset relative to the base.
9
+ * That is, given a divisor d, an n = i * d (all integers), we'll return i.
10
+ * We do some pre-computation to do this more quickly than a CPU division
11
+ * instruction.
12
+ * We bound n < 2^32, and don't support dividing by one.
13
+ */
14
+
15
+ typedef struct div_info_s div_info_t;
16
+ struct div_info_s {
17
+ uint32_t magic;
18
+ #ifdef JEMALLOC_DEBUG
19
+ size_t d;
20
+ #endif
21
+ };
22
+
23
+ void div_init(div_info_t *div_info, size_t divisor);
24
+
25
+ static inline size_t
26
+ div_compute(div_info_t *div_info, size_t n) {
27
+ assert(n <= (uint32_t)-1);
28
+ /*
29
+ * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
30
+ * the compilers I tried were all smart enough to turn this into the
31
+ * appropriate "get the high 32 bits of the result of a multiply" (e.g.
32
+ * mul; mov edx eax; on x86, umull on arm, etc.).
33
+ */
34
+ size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
35
+ #ifdef JEMALLOC_DEBUG
36
+ assert(i * div_info->d == n);
37
+ #endif
38
+ return i;
39
+ }
40
+
41
+ #endif /* JEMALLOC_INTERNAL_DIV_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ecache.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_ECACHE_H
2
+ #define JEMALLOC_INTERNAL_ECACHE_H
3
+
4
+ #include "jemalloc/internal/eset.h"
5
+ #include "jemalloc/internal/san.h"
6
+ #include "jemalloc/internal/mutex.h"
7
+
8
+ typedef struct ecache_s ecache_t;
9
+ struct ecache_s {
10
+ malloc_mutex_t mtx;
11
+ eset_t eset;
12
+ eset_t guarded_eset;
13
+ /* All stored extents must be in the same state. */
14
+ extent_state_t state;
15
+ /* The index of the ehooks the ecache is associated with. */
16
+ unsigned ind;
17
+ /*
18
+ * If true, delay coalescing until eviction; otherwise coalesce during
19
+ * deallocation.
20
+ */
21
+ bool delay_coalesce;
22
+ };
23
+
24
+ static inline size_t
25
+ ecache_npages_get(ecache_t *ecache) {
26
+ return eset_npages_get(&ecache->eset) +
27
+ eset_npages_get(&ecache->guarded_eset);
28
+ }
29
+
30
+ /* Get the number of extents in the given page size index. */
31
+ static inline size_t
32
+ ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
33
+ return eset_nextents_get(&ecache->eset, ind) +
34
+ eset_nextents_get(&ecache->guarded_eset, ind);
35
+ }
36
+
37
+ /* Get the sum total bytes of the extents in the given page size index. */
38
+ static inline size_t
39
+ ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
40
+ return eset_nbytes_get(&ecache->eset, ind) +
41
+ eset_nbytes_get(&ecache->guarded_eset, ind);
42
+ }
43
+
44
+ static inline unsigned
45
+ ecache_ind_get(ecache_t *ecache) {
46
+ return ecache->ind;
47
+ }
48
+
49
+ bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
50
+ unsigned ind, bool delay_coalesce);
51
+ void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
52
+ void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
53
+ void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
54
+
55
+ #endif /* JEMALLOC_INTERNAL_ECACHE_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/edata.h ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EDATA_H
2
+ #define JEMALLOC_INTERNAL_EDATA_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/bin_info.h"
6
+ #include "jemalloc/internal/bit_util.h"
7
+ #include "jemalloc/internal/hpdata.h"
8
+ #include "jemalloc/internal/nstime.h"
9
+ #include "jemalloc/internal/ph.h"
10
+ #include "jemalloc/internal/ql.h"
11
+ #include "jemalloc/internal/sc.h"
12
+ #include "jemalloc/internal/slab_data.h"
13
+ #include "jemalloc/internal/sz.h"
14
+ #include "jemalloc/internal/typed_list.h"
15
+
16
+ /*
17
+ * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
18
+ * to free up the low bits in the rtree leaf.
19
+ */
20
+ #define EDATA_ALIGNMENT 128
21
+
22
+ enum extent_state_e {
23
+ extent_state_active = 0,
24
+ extent_state_dirty = 1,
25
+ extent_state_muzzy = 2,
26
+ extent_state_retained = 3,
27
+ extent_state_transition = 4, /* States below are intermediate. */
28
+ extent_state_merging = 5,
29
+ extent_state_max = 5 /* Sanity checking only. */
30
+ };
31
+ typedef enum extent_state_e extent_state_t;
32
+
33
+ enum extent_head_state_e {
34
+ EXTENT_NOT_HEAD,
35
+ EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
36
+ };
37
+ typedef enum extent_head_state_e extent_head_state_t;
38
+
39
+ /*
40
+ * Which implementation of the page allocator interface, (PAI, defined in
41
+ * pai.h) owns the given extent?
42
+ */
43
+ enum extent_pai_e {
44
+ EXTENT_PAI_PAC = 0,
45
+ EXTENT_PAI_HPA = 1
46
+ };
47
+ typedef enum extent_pai_e extent_pai_t;
48
+
49
+ struct e_prof_info_s {
50
+ /* Time when this was allocated. */
51
+ nstime_t e_prof_alloc_time;
52
+ /* Allocation request size. */
53
+ size_t e_prof_alloc_size;
54
+ /* Points to a prof_tctx_t. */
55
+ atomic_p_t e_prof_tctx;
56
+ /*
57
+ * Points to a prof_recent_t for the allocation; NULL
58
+ * means the recent allocation record no longer exists.
59
+ * Protected by prof_recent_alloc_mtx.
60
+ */
61
+ atomic_p_t e_prof_recent_alloc;
62
+ };
63
+ typedef struct e_prof_info_s e_prof_info_t;
64
+
65
+ /*
66
+ * The information about a particular edata that lives in an emap. Space is
67
+ * more precious there (the information, plus the edata pointer, has to live in
68
+ * a 64-bit word if we want to enable a packed representation.
69
+ *
70
+ * There are two things that are special about the information here:
71
+ * - It's quicker to access. You have one fewer pointer hop, since finding the
72
+ * edata_t associated with an item always requires accessing the rtree leaf in
73
+ * which this data is stored.
74
+ * - It can be read unsynchronized, and without worrying about lifetime issues.
75
+ */
76
+ typedef struct edata_map_info_s edata_map_info_t;
77
+ struct edata_map_info_s {
78
+ bool slab;
79
+ szind_t szind;
80
+ };
81
+
82
+ typedef struct edata_cmp_summary_s edata_cmp_summary_t;
83
+ struct edata_cmp_summary_s {
84
+ uint64_t sn;
85
+ uintptr_t addr;
86
+ };
87
+
88
+ /* Extent (span of pages). Use accessor functions for e_* fields. */
89
+ typedef struct edata_s edata_t;
90
+ ph_structs(edata_avail, edata_t);
91
+ ph_structs(edata_heap, edata_t);
92
+ struct edata_s {
93
+ /*
94
+ * Bitfield containing several fields:
95
+ *
96
+ * a: arena_ind
97
+ * b: slab
98
+ * c: committed
99
+ * p: pai
100
+ * z: zeroed
101
+ * g: guarded
102
+ * t: state
103
+ * i: szind
104
+ * f: nfree
105
+ * s: bin_shard
106
+ *
107
+ * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
108
+ *
109
+ * arena_ind: Arena from which this extent came, or all 1 bits if
110
+ * unassociated.
111
+ *
112
+ * slab: The slab flag indicates whether the extent is used for a slab
113
+ * of small regions. This helps differentiate small size classes,
114
+ * and it indicates whether interior pointers can be looked up via
115
+ * iealloc().
116
+ *
117
+ * committed: The committed flag indicates whether physical memory is
118
+ * committed to the extent, whether explicitly or implicitly
119
+ * as on a system that overcommits and satisfies physical
120
+ * memory needs on demand via soft page faults.
121
+ *
122
+ * pai: The pai flag is an extent_pai_t.
123
+ *
124
+ * zeroed: The zeroed flag is used by extent recycling code to track
125
+ * whether memory is zero-filled.
126
+ *
127
+ * guarded: The guarded flag is use by the sanitizer to track whether
128
+ * the extent has page guards around it.
129
+ *
130
+ * state: The state flag is an extent_state_t.
131
+ *
132
+ * szind: The szind flag indicates usable size class index for
133
+ * allocations residing in this extent, regardless of whether the
134
+ * extent is a slab. Extent size and usable size often differ
135
+ * even for non-slabs, either due to sz_large_pad or promotion of
136
+ * sampled small regions.
137
+ *
138
+ * nfree: Number of free regions in slab.
139
+ *
140
+ * bin_shard: the shard of the bin from which this extent came.
141
+ */
142
+ uint64_t e_bits;
143
+ #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
144
+
145
+ #define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
146
+ #define EDATA_BITS_ARENA_SHIFT 0
147
+ #define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
148
+
149
+ #define EDATA_BITS_SLAB_WIDTH 1
150
+ #define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
151
+ #define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
152
+
153
+ #define EDATA_BITS_COMMITTED_WIDTH 1
154
+ #define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
155
+ #define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
156
+
157
+ #define EDATA_BITS_PAI_WIDTH 1
158
+ #define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
159
+ #define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
160
+
161
+ #define EDATA_BITS_ZEROED_WIDTH 1
162
+ #define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
163
+ #define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
164
+
165
+ #define EDATA_BITS_GUARDED_WIDTH 1
166
+ #define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
167
+ #define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
168
+
169
+ #define EDATA_BITS_STATE_WIDTH 3
170
+ #define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
171
+ #define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
172
+
173
+ #define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
174
+ #define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
175
+ #define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
176
+
177
+ #define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
178
+ #define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
179
+ #define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
180
+
181
+ #define EDATA_BITS_BINSHARD_WIDTH 6
182
+ #define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
183
+ #define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
184
+
185
+ #define EDATA_BITS_IS_HEAD_WIDTH 1
186
+ #define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
187
+ #define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
188
+
189
+ /* Pointer to the extent that this structure is responsible for. */
190
+ void *e_addr;
191
+
192
+ union {
193
+ /*
194
+ * Extent size and serial number associated with the extent
195
+ * structure (different than the serial number for the extent at
196
+ * e_addr).
197
+ *
198
+ * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
199
+ */
200
+ size_t e_size_esn;
201
+ #define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
202
+ #define EDATA_ESN_MASK ((size_t)PAGE-1)
203
+ /* Base extent size, which may not be a multiple of PAGE. */
204
+ size_t e_bsize;
205
+ };
206
+
207
+ /*
208
+ * If this edata is a user allocation from an HPA, it comes out of some
209
+ * pageslab (we don't yet support huegpage allocations that don't fit
210
+ * into pageslabs). This tracks it.
211
+ */
212
+ hpdata_t *e_ps;
213
+
214
+ /*
215
+ * Serial number. These are not necessarily unique; splitting an extent
216
+ * results in two extents with the same serial number.
217
+ */
218
+ uint64_t e_sn;
219
+
220
+ union {
221
+ /*
222
+ * List linkage used when the edata_t is active; either in
223
+ * arena's large allocations or bin_t's slabs_full.
224
+ */
225
+ ql_elm(edata_t) ql_link_active;
226
+ /*
227
+ * Pairing heap linkage. Used whenever the extent is inactive
228
+ * (in the page allocators), or when it is active and in
229
+ * slabs_nonfull, or when the edata_t is unassociated with an
230
+ * extent and sitting in an edata_cache.
231
+ */
232
+ union {
233
+ edata_heap_link_t heap_link;
234
+ edata_avail_link_t avail_link;
235
+ };
236
+ };
237
+
238
+ union {
239
+ /*
240
+ * List linkage used when the extent is inactive:
241
+ * - Stashed dirty extents
242
+ * - Ecache LRU functionality.
243
+ */
244
+ ql_elm(edata_t) ql_link_inactive;
245
+ /* Small region slab metadata. */
246
+ slab_data_t e_slab_data;
247
+
248
+ /* Profiling data, used for large objects. */
249
+ e_prof_info_t e_prof_info;
250
+ };
251
+ };
252
+
253
+ TYPED_LIST(edata_list_active, edata_t, ql_link_active)
254
+ TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
255
+
256
+ static inline unsigned
257
+ edata_arena_ind_get(const edata_t *edata) {
258
+ unsigned arena_ind = (unsigned)((edata->e_bits &
259
+ EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
260
+ assert(arena_ind < MALLOCX_ARENA_LIMIT);
261
+
262
+ return arena_ind;
263
+ }
264
+
265
+ static inline szind_t
266
+ edata_szind_get_maybe_invalid(const edata_t *edata) {
267
+ szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
268
+ EDATA_BITS_SZIND_SHIFT);
269
+ assert(szind <= SC_NSIZES);
270
+ return szind;
271
+ }
272
+
273
+ static inline szind_t
274
+ edata_szind_get(const edata_t *edata) {
275
+ szind_t szind = edata_szind_get_maybe_invalid(edata);
276
+ assert(szind < SC_NSIZES); /* Never call when "invalid". */
277
+ return szind;
278
+ }
279
+
280
+ static inline size_t
281
+ edata_usize_get(const edata_t *edata) {
282
+ return sz_index2size(edata_szind_get(edata));
283
+ }
284
+
285
+ static inline unsigned
286
+ edata_binshard_get(const edata_t *edata) {
287
+ unsigned binshard = (unsigned)((edata->e_bits &
288
+ EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
289
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
290
+ return binshard;
291
+ }
292
+
293
+ static inline uint64_t
294
+ edata_sn_get(const edata_t *edata) {
295
+ return edata->e_sn;
296
+ }
297
+
298
+ static inline extent_state_t
299
+ edata_state_get(const edata_t *edata) {
300
+ return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
301
+ EDATA_BITS_STATE_SHIFT);
302
+ }
303
+
304
+ static inline bool
305
+ edata_guarded_get(const edata_t *edata) {
306
+ return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
307
+ EDATA_BITS_GUARDED_SHIFT);
308
+ }
309
+
310
+ static inline bool
311
+ edata_zeroed_get(const edata_t *edata) {
312
+ return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
313
+ EDATA_BITS_ZEROED_SHIFT);
314
+ }
315
+
316
+ static inline bool
317
+ edata_committed_get(const edata_t *edata) {
318
+ return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
319
+ EDATA_BITS_COMMITTED_SHIFT);
320
+ }
321
+
322
+ static inline extent_pai_t
323
+ edata_pai_get(const edata_t *edata) {
324
+ return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
325
+ EDATA_BITS_PAI_SHIFT);
326
+ }
327
+
328
+ static inline bool
329
+ edata_slab_get(const edata_t *edata) {
330
+ return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
331
+ EDATA_BITS_SLAB_SHIFT);
332
+ }
333
+
334
+ static inline unsigned
335
+ edata_nfree_get(const edata_t *edata) {
336
+ assert(edata_slab_get(edata));
337
+ return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
338
+ EDATA_BITS_NFREE_SHIFT);
339
+ }
340
+
341
+ static inline void *
342
+ edata_base_get(const edata_t *edata) {
343
+ assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
344
+ !edata_slab_get(edata));
345
+ return PAGE_ADDR2BASE(edata->e_addr);
346
+ }
347
+
348
+ static inline void *
349
+ edata_addr_get(const edata_t *edata) {
350
+ assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
351
+ !edata_slab_get(edata));
352
+ return edata->e_addr;
353
+ }
354
+
355
+ static inline size_t
356
+ edata_size_get(const edata_t *edata) {
357
+ return (edata->e_size_esn & EDATA_SIZE_MASK);
358
+ }
359
+
360
+ static inline size_t
361
+ edata_esn_get(const edata_t *edata) {
362
+ return (edata->e_size_esn & EDATA_ESN_MASK);
363
+ }
364
+
365
+ static inline size_t
366
+ edata_bsize_get(const edata_t *edata) {
367
+ return edata->e_bsize;
368
+ }
369
+
370
+ static inline hpdata_t *
371
+ edata_ps_get(const edata_t *edata) {
372
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
373
+ return edata->e_ps;
374
+ }
375
+
376
+ static inline void *
377
+ edata_before_get(const edata_t *edata) {
378
+ return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
379
+ }
380
+
381
+ static inline void *
382
+ edata_last_get(const edata_t *edata) {
383
+ return (void *)((uintptr_t)edata_base_get(edata) +
384
+ edata_size_get(edata) - PAGE);
385
+ }
386
+
387
+ static inline void *
388
+ edata_past_get(const edata_t *edata) {
389
+ return (void *)((uintptr_t)edata_base_get(edata) +
390
+ edata_size_get(edata));
391
+ }
392
+
393
+ static inline slab_data_t *
394
+ edata_slab_data_get(edata_t *edata) {
395
+ assert(edata_slab_get(edata));
396
+ return &edata->e_slab_data;
397
+ }
398
+
399
+ static inline const slab_data_t *
400
+ edata_slab_data_get_const(const edata_t *edata) {
401
+ assert(edata_slab_get(edata));
402
+ return &edata->e_slab_data;
403
+ }
404
+
405
+ static inline prof_tctx_t *
406
+ edata_prof_tctx_get(const edata_t *edata) {
407
+ return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
408
+ ATOMIC_ACQUIRE);
409
+ }
410
+
411
+ static inline const nstime_t *
412
+ edata_prof_alloc_time_get(const edata_t *edata) {
413
+ return &edata->e_prof_info.e_prof_alloc_time;
414
+ }
415
+
416
+ static inline size_t
417
+ edata_prof_alloc_size_get(const edata_t *edata) {
418
+ return edata->e_prof_info.e_prof_alloc_size;
419
+ }
420
+
421
+ static inline prof_recent_t *
422
+ edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
423
+ return (prof_recent_t *)atomic_load_p(
424
+ &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
425
+ }
426
+
427
+ static inline void
428
+ edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
429
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
430
+ ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
431
+ }
432
+
433
+ static inline void
434
+ edata_binshard_set(edata_t *edata, unsigned binshard) {
435
+ /* The assertion assumes szind is set already. */
436
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
437
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
438
+ ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
439
+ }
440
+
441
+ static inline void
442
+ edata_addr_set(edata_t *edata, void *addr) {
443
+ edata->e_addr = addr;
444
+ }
445
+
446
+ static inline void
447
+ edata_size_set(edata_t *edata, size_t size) {
448
+ assert((size & ~EDATA_SIZE_MASK) == 0);
449
+ edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
450
+ }
451
+
452
+ static inline void
453
+ edata_esn_set(edata_t *edata, size_t esn) {
454
+ edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
455
+ EDATA_ESN_MASK);
456
+ }
457
+
458
+ static inline void
459
+ edata_bsize_set(edata_t *edata, size_t bsize) {
460
+ edata->e_bsize = bsize;
461
+ }
462
+
463
+ static inline void
464
+ edata_ps_set(edata_t *edata, hpdata_t *ps) {
465
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
466
+ edata->e_ps = ps;
467
+ }
468
+
469
+ static inline void
470
+ edata_szind_set(edata_t *edata, szind_t szind) {
471
+ assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
472
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
473
+ ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
474
+ }
475
+
476
+ static inline void
477
+ edata_nfree_set(edata_t *edata, unsigned nfree) {
478
+ assert(edata_slab_get(edata));
479
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
480
+ ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
481
+ }
482
+
483
+ static inline void
484
+ edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
485
+ /* The assertion assumes szind is set already. */
486
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
487
+ edata->e_bits = (edata->e_bits &
488
+ (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
489
+ ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
490
+ ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
491
+ }
492
+
493
+ static inline void
494
+ edata_nfree_inc(edata_t *edata) {
495
+ assert(edata_slab_get(edata));
496
+ edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
497
+ }
498
+
499
+ static inline void
500
+ edata_nfree_dec(edata_t *edata) {
501
+ assert(edata_slab_get(edata));
502
+ edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
503
+ }
504
+
505
+ static inline void
506
+ edata_nfree_sub(edata_t *edata, uint64_t n) {
507
+ assert(edata_slab_get(edata));
508
+ edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
509
+ }
510
+
511
+ static inline void
512
+ edata_sn_set(edata_t *edata, uint64_t sn) {
513
+ edata->e_sn = sn;
514
+ }
515
+
516
+ static inline void
517
+ edata_state_set(edata_t *edata, extent_state_t state) {
518
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
519
+ ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
520
+ }
521
+
522
+ static inline void
523
+ edata_guarded_set(edata_t *edata, bool guarded) {
524
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
525
+ ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
526
+ }
527
+
528
+ static inline void
529
+ edata_zeroed_set(edata_t *edata, bool zeroed) {
530
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
531
+ ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
532
+ }
533
+
534
+ static inline void
535
+ edata_committed_set(edata_t *edata, bool committed) {
536
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
537
+ ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
538
+ }
539
+
540
+ static inline void
541
+ edata_pai_set(edata_t *edata, extent_pai_t pai) {
542
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
543
+ ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
544
+ }
545
+
546
+ static inline void
547
+ edata_slab_set(edata_t *edata, bool slab) {
548
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
549
+ ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
550
+ }
551
+
552
+ static inline void
553
+ edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
554
+ atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
555
+ }
556
+
557
+ static inline void
558
+ edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
559
+ nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
560
+ }
561
+
562
+ static inline void
563
+ edata_prof_alloc_size_set(edata_t *edata, size_t size) {
564
+ edata->e_prof_info.e_prof_alloc_size = size;
565
+ }
566
+
567
+ static inline void
568
+ edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
569
+ prof_recent_t *recent_alloc) {
570
+ atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
571
+ ATOMIC_RELAXED);
572
+ }
573
+
574
+ static inline bool
575
+ edata_is_head_get(edata_t *edata) {
576
+ return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
577
+ EDATA_BITS_IS_HEAD_SHIFT);
578
+ }
579
+
580
+ static inline void
581
+ edata_is_head_set(edata_t *edata, bool is_head) {
582
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
583
+ ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
584
+ }
585
+
586
+ static inline bool
587
+ edata_state_in_transition(extent_state_t state) {
588
+ return state >= extent_state_transition;
589
+ }
590
+
591
+ /*
592
+ * Because this function is implemented as a sequence of bitfield modifications,
593
+ * even though each individual bit is properly initialized, we technically read
594
+ * uninitialized data within it. This is mostly fine, since most callers get
595
+ * their edatas from zeroing sources, but callers who make stack edata_ts need
596
+ * to manually zero them.
597
+ */
598
+ static inline void
599
+ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
600
+ bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
601
+ bool committed, extent_pai_t pai, extent_head_state_t is_head) {
602
+ assert(addr == PAGE_ADDR2BASE(addr) || !slab);
603
+
604
+ edata_arena_ind_set(edata, arena_ind);
605
+ edata_addr_set(edata, addr);
606
+ edata_size_set(edata, size);
607
+ edata_slab_set(edata, slab);
608
+ edata_szind_set(edata, szind);
609
+ edata_sn_set(edata, sn);
610
+ edata_state_set(edata, state);
611
+ edata_guarded_set(edata, false);
612
+ edata_zeroed_set(edata, zeroed);
613
+ edata_committed_set(edata, committed);
614
+ edata_pai_set(edata, pai);
615
+ edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
616
+ if (config_prof) {
617
+ edata_prof_tctx_set(edata, NULL);
618
+ }
619
+ }
620
+
621
+ static inline void
622
+ edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
623
+ edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
624
+ edata_addr_set(edata, addr);
625
+ edata_bsize_set(edata, bsize);
626
+ edata_slab_set(edata, false);
627
+ edata_szind_set(edata, SC_NSIZES);
628
+ edata_sn_set(edata, sn);
629
+ edata_state_set(edata, extent_state_active);
630
+ edata_guarded_set(edata, false);
631
+ edata_zeroed_set(edata, true);
632
+ edata_committed_set(edata, true);
633
+ /*
634
+ * This isn't strictly true, but base allocated extents never get
635
+ * deallocated and can't be looked up in the emap, but no sense in
636
+ * wasting a state bit to encode this fact.
637
+ */
638
+ edata_pai_set(edata, EXTENT_PAI_PAC);
639
+ }
640
+
641
+ static inline int
642
+ edata_esn_comp(const edata_t *a, const edata_t *b) {
643
+ size_t a_esn = edata_esn_get(a);
644
+ size_t b_esn = edata_esn_get(b);
645
+
646
+ return (a_esn > b_esn) - (a_esn < b_esn);
647
+ }
648
+
649
+ static inline int
650
+ edata_ead_comp(const edata_t *a, const edata_t *b) {
651
+ uintptr_t a_eaddr = (uintptr_t)a;
652
+ uintptr_t b_eaddr = (uintptr_t)b;
653
+
654
+ return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
655
+ }
656
+
657
+ static inline edata_cmp_summary_t
658
+ edata_cmp_summary_get(const edata_t *edata) {
659
+ return (edata_cmp_summary_t){edata_sn_get(edata),
660
+ (uintptr_t)edata_addr_get(edata)};
661
+ }
662
+
663
+ static inline int
664
+ edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
665
+ int ret;
666
+ ret = (a.sn > b.sn) - (a.sn < b.sn);
667
+ if (ret != 0) {
668
+ return ret;
669
+ }
670
+ ret = (a.addr > b.addr) - (a.addr < b.addr);
671
+ return ret;
672
+ }
673
+
674
+ static inline int
675
+ edata_snad_comp(const edata_t *a, const edata_t *b) {
676
+ edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
677
+ edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
678
+
679
+ return edata_cmp_summary_comp(a_cmp, b_cmp);
680
+ }
681
+
682
+ static inline int
683
+ edata_esnead_comp(const edata_t *a, const edata_t *b) {
684
+ int ret;
685
+
686
+ ret = edata_esn_comp(a, b);
687
+ if (ret != 0) {
688
+ return ret;
689
+ }
690
+
691
+ ret = edata_ead_comp(a, b);
692
+ return ret;
693
+ }
694
+
695
+ ph_proto(, edata_avail, edata_t)
696
+ ph_proto(, edata_heap, edata_t)
697
+
698
+ #endif /* JEMALLOC_INTERNAL_EDATA_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/edata_cache.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H
2
+ #define JEMALLOC_INTERNAL_EDATA_CACHE_H
3
+
4
+ #include "jemalloc/internal/base.h"
5
+
6
+ /* For tests only. */
7
+ #define EDATA_CACHE_FAST_FILL 4
8
+
9
+ /*
10
+ * A cache of edata_t structures allocated via base_alloc_edata (as opposed to
11
+ * the underlying extents they describe). The contents of returned edata_t
12
+ * objects are garbage and cannot be relied upon.
13
+ */
14
+
15
+ typedef struct edata_cache_s edata_cache_t;
16
+ struct edata_cache_s {
17
+ edata_avail_t avail;
18
+ atomic_zu_t count;
19
+ malloc_mutex_t mtx;
20
+ base_t *base;
21
+ };
22
+
23
+ bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
24
+ edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
25
+ void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
26
+
27
+ void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
28
+ void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
29
+ void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
30
+
31
+ /*
32
+ * An edata_cache_small is like an edata_cache, but it relies on external
33
+ * synchronization and avoids first-fit strategies.
34
+ */
35
+
36
+ typedef struct edata_cache_fast_s edata_cache_fast_t;
37
+ struct edata_cache_fast_s {
38
+ edata_list_inactive_t list;
39
+ edata_cache_t *fallback;
40
+ bool disabled;
41
+ };
42
+
43
+ void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
44
+ edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
45
+ void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
46
+ edata_t *edata);
47
+ void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
48
+
49
+ #endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/ehooks.h ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EHOOKS_H
2
+ #define JEMALLOC_INTERNAL_EHOOKS_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/extent_mmap.h"
6
+
7
+ /*
8
+ * This module is the internal interface to the extent hooks (both
9
+ * user-specified and external). Eventually, this will give us the flexibility
10
+ * to use multiple different versions of user-visible extent-hook APIs under a
11
+ * single user interface.
12
+ *
13
+ * Current API expansions (not available to anyone but the default hooks yet):
14
+ * - Head state tracking. Hooks can decide whether or not to merge two
15
+ * extents based on whether or not one of them is the head (i.e. was
16
+ * allocated on its own). The later extent loses its "head" status.
17
+ */
18
+
19
+ extern const extent_hooks_t ehooks_default_extent_hooks;
20
+
21
+ typedef struct ehooks_s ehooks_t;
22
+ struct ehooks_s {
23
+ /*
24
+ * The user-visible id that goes with the ehooks (i.e. that of the base
25
+ * they're a part of, the associated arena's index within the arenas
26
+ * array).
27
+ */
28
+ unsigned ind;
29
+ /* Logically an extent_hooks_t *. */
30
+ atomic_p_t ptr;
31
+ };
32
+
33
+ extern const extent_hooks_t ehooks_default_extent_hooks;
34
+
35
+ /*
36
+ * These are not really part of the public API. Each hook has a fast-path for
37
+ * the default-hooks case that can avoid various small inefficiencies:
38
+ * - Forgetting tsd and then calling tsd_get within the hook.
39
+ * - Getting more state than necessary out of the extent_t.
40
+ * - Doing arena_ind -> arena -> arena_ind lookups.
41
+ * By making the calls to these functions visible to the compiler, it can move
42
+ * those extra bits of computation down below the fast-paths where they get ignored.
43
+ */
44
+ void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
45
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
46
+ bool ehooks_default_dalloc_impl(void *addr, size_t size);
47
+ void ehooks_default_destroy_impl(void *addr, size_t size);
48
+ bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
49
+ bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
50
+ #ifdef PAGES_CAN_PURGE_LAZY
51
+ bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
52
+ #endif
53
+ #ifdef PAGES_CAN_PURGE_FORCED
54
+ bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
55
+ #endif
56
+ bool ehooks_default_split_impl();
57
+ /*
58
+ * Merge is the only default extent hook we declare -- see the comment in
59
+ * ehooks_merge.
60
+ */
61
+ bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
62
+ size_t size_a, void *addr_b, size_t size_b, bool committed,
63
+ unsigned arena_ind);
64
+ bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
65
+ void ehooks_default_zero_impl(void *addr, size_t size);
66
+ void ehooks_default_guard_impl(void *guard1, void *guard2);
67
+ void ehooks_default_unguard_impl(void *guard1, void *guard2);
68
+
69
+ /*
70
+ * We don't officially support reentrancy from wtihin the extent hooks. But
71
+ * various people who sit within throwing distance of the jemalloc team want
72
+ * that functionality in certain limited cases. The default reentrancy guards
73
+ * assert that we're not reentrant from a0 (since it's the bootstrap arena,
74
+ * where reentrant allocations would be redirected), which we would incorrectly
75
+ * trigger in cases where a0 has extent hooks (those hooks themselves can't be
76
+ * reentrant, then, but there are reasonable uses for such functionality, like
77
+ * putting internal metadata on hugepages). Therefore, we use the raw
78
+ * reentrancy guards.
79
+ *
80
+ * Eventually, we need to think more carefully about whether and where we
81
+ * support allocating from within extent hooks (and what that means for things
82
+ * like profiling, stats collection, etc.), and document what the guarantee is.
83
+ */
84
+ static inline void
85
+ ehooks_pre_reentrancy(tsdn_t *tsdn) {
86
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
87
+ tsd_pre_reentrancy_raw(tsd);
88
+ }
89
+
90
+ static inline void
91
+ ehooks_post_reentrancy(tsdn_t *tsdn) {
92
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
93
+ tsd_post_reentrancy_raw(tsd);
94
+ }
95
+
96
+ /* Beginning of the public API. */
97
+ void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
98
+
99
+ static inline unsigned
100
+ ehooks_ind_get(const ehooks_t *ehooks) {
101
+ return ehooks->ind;
102
+ }
103
+
104
+ static inline void
105
+ ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
106
+ atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
107
+ }
108
+
109
+ static inline extent_hooks_t *
110
+ ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
111
+ return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
112
+ }
113
+
114
+ static inline bool
115
+ ehooks_are_default(ehooks_t *ehooks) {
116
+ return ehooks_get_extent_hooks_ptr(ehooks) ==
117
+ &ehooks_default_extent_hooks;
118
+ }
119
+
120
+ /*
121
+ * In some cases, a caller needs to allocate resources before attempting to call
122
+ * a hook. If that hook is doomed to fail, this is wasteful. We therefore
123
+ * include some checks for such cases.
124
+ */
125
+ static inline bool
126
+ ehooks_dalloc_will_fail(ehooks_t *ehooks) {
127
+ if (ehooks_are_default(ehooks)) {
128
+ return opt_retain;
129
+ } else {
130
+ return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
131
+ }
132
+ }
133
+
134
+ static inline bool
135
+ ehooks_split_will_fail(ehooks_t *ehooks) {
136
+ return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
137
+ }
138
+
139
+ static inline bool
140
+ ehooks_merge_will_fail(ehooks_t *ehooks) {
141
+ return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
142
+ }
143
+
144
+ static inline bool
145
+ ehooks_guard_will_fail(ehooks_t *ehooks) {
146
+ /*
147
+ * Before the guard hooks are officially introduced, limit the use to
148
+ * the default hooks only.
149
+ */
150
+ return !ehooks_are_default(ehooks);
151
+ }
152
+
153
+ /*
154
+ * Some hooks are required to return zeroed memory in certain situations. In
155
+ * debug mode, we do some heuristic checks that they did what they were supposed
156
+ * to.
157
+ *
158
+ * This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
159
+ * But incorrect zero information indicates an ehook bug.
160
+ */
161
+ static inline void
162
+ ehooks_debug_zero_check(void *addr, size_t size) {
163
+ assert(((uintptr_t)addr & PAGE_MASK) == 0);
164
+ assert((size & PAGE_MASK) == 0);
165
+ assert(size > 0);
166
+ if (config_debug) {
167
+ /* Check the whole first page. */
168
+ size_t *p = (size_t *)addr;
169
+ for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
170
+ assert(p[i] == 0);
171
+ }
172
+ /*
173
+ * And 4 spots within. There's a tradeoff here; the larger
174
+ * this number, the more likely it is that we'll catch a bug
175
+ * where ehooks return a sparsely non-zero range. But
176
+ * increasing the number of checks also increases the number of
177
+ * page faults in debug mode. FreeBSD does much of their
178
+ * day-to-day development work in debug mode, so we don't want
179
+ * even the debug builds to be too slow.
180
+ */
181
+ const size_t nchecks = 4;
182
+ assert(PAGE >= sizeof(size_t) * nchecks);
183
+ for (size_t i = 0; i < nchecks; ++i) {
184
+ assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
185
+ }
186
+ }
187
+ }
188
+
189
+
190
+ static inline void *
191
+ ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
192
+ size_t alignment, bool *zero, bool *commit) {
193
+ bool orig_zero = *zero;
194
+ void *ret;
195
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
196
+ if (extent_hooks == &ehooks_default_extent_hooks) {
197
+ ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
198
+ alignment, zero, commit, ehooks_ind_get(ehooks));
199
+ } else {
200
+ ehooks_pre_reentrancy(tsdn);
201
+ ret = extent_hooks->alloc(extent_hooks, new_addr, size,
202
+ alignment, zero, commit, ehooks_ind_get(ehooks));
203
+ ehooks_post_reentrancy(tsdn);
204
+ }
205
+ assert(new_addr == NULL || ret == NULL || new_addr == ret);
206
+ assert(!orig_zero || *zero);
207
+ if (*zero && ret != NULL) {
208
+ ehooks_debug_zero_check(ret, size);
209
+ }
210
+ return ret;
211
+ }
212
+
213
+ static inline bool
214
+ ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
215
+ bool committed) {
216
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
217
+ if (extent_hooks == &ehooks_default_extent_hooks) {
218
+ return ehooks_default_dalloc_impl(addr, size);
219
+ } else if (extent_hooks->dalloc == NULL) {
220
+ return true;
221
+ } else {
222
+ ehooks_pre_reentrancy(tsdn);
223
+ bool err = extent_hooks->dalloc(extent_hooks, addr, size,
224
+ committed, ehooks_ind_get(ehooks));
225
+ ehooks_post_reentrancy(tsdn);
226
+ return err;
227
+ }
228
+ }
229
+
230
+ static inline void
231
+ ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
232
+ bool committed) {
233
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
234
+ if (extent_hooks == &ehooks_default_extent_hooks) {
235
+ ehooks_default_destroy_impl(addr, size);
236
+ } else if (extent_hooks->destroy == NULL) {
237
+ /* Do nothing. */
238
+ } else {
239
+ ehooks_pre_reentrancy(tsdn);
240
+ extent_hooks->destroy(extent_hooks, addr, size, committed,
241
+ ehooks_ind_get(ehooks));
242
+ ehooks_post_reentrancy(tsdn);
243
+ }
244
+ }
245
+
246
+ static inline bool
247
+ ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
248
+ size_t offset, size_t length) {
249
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
250
+ bool err;
251
+ if (extent_hooks == &ehooks_default_extent_hooks) {
252
+ err = ehooks_default_commit_impl(addr, offset, length);
253
+ } else if (extent_hooks->commit == NULL) {
254
+ err = true;
255
+ } else {
256
+ ehooks_pre_reentrancy(tsdn);
257
+ err = extent_hooks->commit(extent_hooks, addr, size,
258
+ offset, length, ehooks_ind_get(ehooks));
259
+ ehooks_post_reentrancy(tsdn);
260
+ }
261
+ if (!err) {
262
+ ehooks_debug_zero_check(addr, size);
263
+ }
264
+ return err;
265
+ }
266
+
267
+ static inline bool
268
+ ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
269
+ size_t offset, size_t length) {
270
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
271
+ if (extent_hooks == &ehooks_default_extent_hooks) {
272
+ return ehooks_default_decommit_impl(addr, offset, length);
273
+ } else if (extent_hooks->decommit == NULL) {
274
+ return true;
275
+ } else {
276
+ ehooks_pre_reentrancy(tsdn);
277
+ bool err = extent_hooks->decommit(extent_hooks, addr, size,
278
+ offset, length, ehooks_ind_get(ehooks));
279
+ ehooks_post_reentrancy(tsdn);
280
+ return err;
281
+ }
282
+ }
283
+
284
+ static inline bool
285
+ ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
286
+ size_t offset, size_t length) {
287
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
288
+ #ifdef PAGES_CAN_PURGE_LAZY
289
+ if (extent_hooks == &ehooks_default_extent_hooks) {
290
+ return ehooks_default_purge_lazy_impl(addr, offset, length);
291
+ }
292
+ #endif
293
+ if (extent_hooks->purge_lazy == NULL) {
294
+ return true;
295
+ } else {
296
+ ehooks_pre_reentrancy(tsdn);
297
+ bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
298
+ offset, length, ehooks_ind_get(ehooks));
299
+ ehooks_post_reentrancy(tsdn);
300
+ return err;
301
+ }
302
+ }
303
+
304
+ static inline bool
305
+ ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
306
+ size_t offset, size_t length) {
307
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
308
+ /*
309
+ * It would be correct to have a ehooks_debug_zero_check call at the end
310
+ * of this function; purge_forced is required to zero. But checking
311
+ * would touch the page in question, which may have performance
312
+ * consequences (imagine the hooks are using hugepages, with a global
313
+ * zero page off). Even in debug mode, it's usually a good idea to
314
+ * avoid cases that can dramatically increase memory consumption.
315
+ */
316
+ #ifdef PAGES_CAN_PURGE_FORCED
317
+ if (extent_hooks == &ehooks_default_extent_hooks) {
318
+ return ehooks_default_purge_forced_impl(addr, offset, length);
319
+ }
320
+ #endif
321
+ if (extent_hooks->purge_forced == NULL) {
322
+ return true;
323
+ } else {
324
+ ehooks_pre_reentrancy(tsdn);
325
+ bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
326
+ offset, length, ehooks_ind_get(ehooks));
327
+ ehooks_post_reentrancy(tsdn);
328
+ return err;
329
+ }
330
+ }
331
+
332
+ static inline bool
333
+ ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
334
+ size_t size_a, size_t size_b, bool committed) {
335
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
336
+ if (ehooks_are_default(ehooks)) {
337
+ return ehooks_default_split_impl();
338
+ } else if (extent_hooks->split == NULL) {
339
+ return true;
340
+ } else {
341
+ ehooks_pre_reentrancy(tsdn);
342
+ bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
343
+ size_b, committed, ehooks_ind_get(ehooks));
344
+ ehooks_post_reentrancy(tsdn);
345
+ return err;
346
+ }
347
+ }
348
+
349
+ static inline bool
350
+ ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
351
+ void *addr_b, size_t size_b, bool committed) {
352
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
353
+ if (extent_hooks == &ehooks_default_extent_hooks) {
354
+ return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
355
+ } else if (extent_hooks->merge == NULL) {
356
+ return true;
357
+ } else {
358
+ ehooks_pre_reentrancy(tsdn);
359
+ bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
360
+ addr_b, size_b, committed, ehooks_ind_get(ehooks));
361
+ ehooks_post_reentrancy(tsdn);
362
+ return err;
363
+ }
364
+ }
365
+
366
+ static inline void
367
+ ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
368
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
369
+ if (extent_hooks == &ehooks_default_extent_hooks) {
370
+ ehooks_default_zero_impl(addr, size);
371
+ } else {
372
+ /*
373
+ * It would be correct to try using the user-provided purge
374
+ * hooks (since they are required to have zeroed the extent if
375
+ * they indicate success), but we don't necessarily know their
376
+ * cost. We'll be conservative and use memset.
377
+ */
378
+ memset(addr, 0, size);
379
+ }
380
+ }
381
+
382
+ static inline bool
383
+ ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
384
+ bool err;
385
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
386
+
387
+ if (extent_hooks == &ehooks_default_extent_hooks) {
388
+ ehooks_default_guard_impl(guard1, guard2);
389
+ err = false;
390
+ } else {
391
+ err = true;
392
+ }
393
+
394
+ return err;
395
+ }
396
+
397
+ static inline bool
398
+ ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
399
+ bool err;
400
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
401
+
402
+ if (extent_hooks == &ehooks_default_extent_hooks) {
403
+ ehooks_default_unguard_impl(guard1, guard2);
404
+ err = false;
405
+ } else {
406
+ err = true;
407
+ }
408
+
409
+ return err;
410
+ }
411
+
412
+ #endif /* JEMALLOC_INTERNAL_EHOOKS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/emap.h ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EMAP_H
2
+ #define JEMALLOC_INTERNAL_EMAP_H
3
+
4
+ #include "jemalloc/internal/base.h"
5
+ #include "jemalloc/internal/rtree.h"
6
+
7
+ /*
8
+ * Note: Ends without at semicolon, so that
9
+ * EMAP_DECLARE_RTREE_CTX;
10
+ * in uses will avoid empty-statement warnings.
11
+ */
12
+ #define EMAP_DECLARE_RTREE_CTX \
13
+ rtree_ctx_t rtree_ctx_fallback; \
14
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
15
+
16
+ typedef struct emap_s emap_t;
17
+ struct emap_s {
18
+ rtree_t rtree;
19
+ };
20
+
21
+ /* Used to pass rtree lookup context down the path. */
22
+ typedef struct emap_alloc_ctx_t emap_alloc_ctx_t;
23
+ struct emap_alloc_ctx_t {
24
+ szind_t szind;
25
+ bool slab;
26
+ };
27
+
28
+ typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
29
+ struct emap_full_alloc_ctx_s {
30
+ szind_t szind;
31
+ bool slab;
32
+ edata_t *edata;
33
+ };
34
+
35
+ bool emap_init(emap_t *emap, base_t *base, bool zeroed);
36
+
37
+ void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
38
+ bool slab);
39
+
40
+ void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
41
+ extent_state_t state);
42
+
43
+ /*
44
+ * The two acquire functions below allow accessing neighbor edatas, if it's safe
45
+ * and valid to do so (i.e. from the same arena, of the same state, etc.). This
46
+ * is necessary because the ecache locks are state based, and only protect
47
+ * edatas with the same state. Therefore the neighbor edata's state needs to be
48
+ * verified first, before chasing the edata pointer. The returned edata will be
49
+ * in an acquired state, meaning other threads will be prevented from accessing
50
+ * it, even if technically the edata can still be discovered from the rtree.
51
+ *
52
+ * This means, at any moment when holding pointers to edata, either one of the
53
+ * state based locks is held (and the edatas are all of the protected state), or
54
+ * the edatas are in an acquired state (e.g. in active or merging state). The
55
+ * acquire operation itself (changing the edata to an acquired state) is done
56
+ * under the state locks.
57
+ */
58
+ edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
59
+ edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
60
+ bool forward);
61
+ edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
62
+ edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
63
+ void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
64
+ extent_state_t new_state);
65
+
66
+ /*
67
+ * Associate the given edata with its beginning and end address, setting the
68
+ * szind and slab info appropriately.
69
+ * Returns true on error (i.e. resource exhaustion).
70
+ */
71
+ bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
72
+ szind_t szind, bool slab);
73
+
74
+ /*
75
+ * Does the same thing, but with the interior of the range, for slab
76
+ * allocations.
77
+ *
78
+ * You might wonder why we don't just have a single emap_register function that
79
+ * does both depending on the value of 'slab'. The answer is twofold:
80
+ * - As a practical matter, in places like the extract->split->commit pathway,
81
+ * we defer the interior operation until we're sure that the commit won't fail
82
+ * (but we have to register the split boundaries there).
83
+ * - In general, we're trying to move to a world where the page-specific
84
+ * allocator doesn't know as much about how the pages it allocates will be
85
+ * used, and passing a 'slab' parameter everywhere makes that more
86
+ * complicated.
87
+ *
88
+ * Unlike the boundary version, this function can't fail; this is because slabs
89
+ * can't get big enough to touch a new page that neither of the boundaries
90
+ * touched, so no allocation is necessary to fill the interior once the boundary
91
+ * has been touched.
92
+ */
93
+ void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
94
+ szind_t szind);
95
+
96
+ void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
97
+ void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
98
+
99
+ typedef struct emap_prepare_s emap_prepare_t;
100
+ struct emap_prepare_s {
101
+ rtree_leaf_elm_t *lead_elm_a;
102
+ rtree_leaf_elm_t *lead_elm_b;
103
+ rtree_leaf_elm_t *trail_elm_a;
104
+ rtree_leaf_elm_t *trail_elm_b;
105
+ };
106
+
107
+ /**
108
+ * These functions the emap metadata management for merging, splitting, and
109
+ * reusing extents. In particular, they set the boundary mappings from
110
+ * addresses to edatas. If the result is going to be used as a slab, you
111
+ * still need to call emap_register_interior on it, though.
112
+ *
113
+ * Remap simply changes the szind and slab status of an extent's boundary
114
+ * mappings. If the extent is not a slab, it doesn't bother with updating the
115
+ * end mapping (since lookups only occur in the interior of an extent for
116
+ * slabs). Since the szind and slab status only make sense for active extents,
117
+ * this should only be called while activating or deactivating an extent.
118
+ *
119
+ * Split and merge have a "prepare" and a "commit" portion. The prepare portion
120
+ * does the operations that can be done without exclusive access to the extent
121
+ * in question, while the commit variant requires exclusive access to maintain
122
+ * the emap invariants. The only function that can fail is emap_split_prepare,
123
+ * and it returns true on failure (at which point the caller shouldn't commit).
124
+ *
125
+ * In all cases, "lead" refers to the lower-addressed extent, and trail to the
126
+ * higher-addressed one. It's the caller's responsibility to set the edata
127
+ * state appropriately.
128
+ */
129
+ bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
130
+ edata_t *edata, size_t size_a, edata_t *trail, size_t size_b);
131
+ void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
132
+ edata_t *lead, size_t size_a, edata_t *trail, size_t size_b);
133
+ void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
134
+ edata_t *lead, edata_t *trail);
135
+ void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
136
+ edata_t *lead, edata_t *trail);
137
+
138
+ /* Assert that the emap's view of the given edata matches the edata's view. */
139
+ void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
140
+ static inline void
141
+ emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
142
+ if (config_debug) {
143
+ emap_do_assert_mapped(tsdn, emap, edata);
144
+ }
145
+ }
146
+
147
+ /* Assert that the given edata isn't in the map. */
148
+ void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
149
+ static inline void
150
+ emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
151
+ if (config_debug) {
152
+ emap_do_assert_not_mapped(tsdn, emap, edata);
153
+ }
154
+ }
155
+
156
+ JEMALLOC_ALWAYS_INLINE bool
157
+ emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
158
+ assert(config_debug);
159
+ emap_assert_mapped(tsdn, emap, edata);
160
+
161
+ EMAP_DECLARE_RTREE_CTX;
162
+ rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
163
+ (uintptr_t)edata_base_get(edata));
164
+
165
+ return edata_state_in_transition(contents.metadata.state);
166
+ }
167
+
168
+ JEMALLOC_ALWAYS_INLINE bool
169
+ emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
170
+ if (!config_debug) {
171
+ /* For assertions only. */
172
+ return false;
173
+ }
174
+
175
+ /*
176
+ * The edata is considered acquired if no other threads will attempt to
177
+ * read / write any fields from it. This includes a few cases:
178
+ *
179
+ * 1) edata not hooked into emap yet -- This implies the edata just got
180
+ * allocated or initialized.
181
+ *
182
+ * 2) in an active or transition state -- In both cases, the edata can
183
+ * be discovered from the emap, however the state tracked in the rtree
184
+ * will prevent other threads from accessing the actual edata.
185
+ */
186
+ EMAP_DECLARE_RTREE_CTX;
187
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
188
+ rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
189
+ /* init_missing */ false);
190
+ if (elm == NULL) {
191
+ return true;
192
+ }
193
+ rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
194
+ /* dependent */ true);
195
+ if (contents.edata == NULL ||
196
+ contents.metadata.state == extent_state_active ||
197
+ edata_state_in_transition(contents.metadata.state)) {
198
+ return true;
199
+ }
200
+
201
+ return false;
202
+ }
203
+
204
+ JEMALLOC_ALWAYS_INLINE void
205
+ extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
206
+ assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer));
207
+ assert(edata_pai_get(inner) == edata_pai_get(outer));
208
+ assert(edata_committed_get(inner) == edata_committed_get(outer));
209
+ assert(edata_state_get(inner) == extent_state_active);
210
+ assert(edata_state_get(outer) == extent_state_merging);
211
+ assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
212
+ assert(edata_base_get(inner) == edata_past_get(outer) ||
213
+ edata_base_get(outer) == edata_past_get(inner));
214
+ }
215
+
216
+ JEMALLOC_ALWAYS_INLINE void
217
+ extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
218
+ assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
219
+ assert(edata_pai_get(original) == edata_pai_get(expand));
220
+ assert(edata_state_get(original) == extent_state_active);
221
+ assert(edata_state_get(expand) == extent_state_merging);
222
+ assert(edata_past_get(original) == edata_base_get(expand));
223
+ }
224
+
225
+ JEMALLOC_ALWAYS_INLINE edata_t *
226
+ emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
227
+ EMAP_DECLARE_RTREE_CTX;
228
+
229
+ return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata;
230
+ }
231
+
232
+ /* Fills in alloc_ctx with the info in the map. */
233
+ JEMALLOC_ALWAYS_INLINE void
234
+ emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
235
+ emap_alloc_ctx_t *alloc_ctx) {
236
+ EMAP_DECLARE_RTREE_CTX;
237
+
238
+ rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree,
239
+ rtree_ctx, (uintptr_t)ptr);
240
+ alloc_ctx->szind = metadata.szind;
241
+ alloc_ctx->slab = metadata.slab;
242
+ }
243
+
244
+ /* The pointer must be mapped. */
245
+ JEMALLOC_ALWAYS_INLINE void
246
+ emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
247
+ emap_full_alloc_ctx_t *full_alloc_ctx) {
248
+ EMAP_DECLARE_RTREE_CTX;
249
+
250
+ rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
251
+ (uintptr_t)ptr);
252
+ full_alloc_ctx->edata = contents.edata;
253
+ full_alloc_ctx->szind = contents.metadata.szind;
254
+ full_alloc_ctx->slab = contents.metadata.slab;
255
+ }
256
+
257
+ /*
258
+ * The pointer is allowed to not be mapped.
259
+ *
260
+ * Returns true when the pointer is not present.
261
+ */
262
+ JEMALLOC_ALWAYS_INLINE bool
263
+ emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
264
+ emap_full_alloc_ctx_t *full_alloc_ctx) {
265
+ EMAP_DECLARE_RTREE_CTX;
266
+
267
+ rtree_contents_t contents;
268
+ bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
269
+ (uintptr_t)ptr, &contents);
270
+ if (err) {
271
+ return true;
272
+ }
273
+ full_alloc_ctx->edata = contents.edata;
274
+ full_alloc_ctx->szind = contents.metadata.szind;
275
+ full_alloc_ctx->slab = contents.metadata.slab;
276
+ return false;
277
+ }
278
+
279
+ /*
280
+ * Only used on the fastpath of free. Returns true when cannot be fulfilled by
281
+ * fast path, e.g. when the metadata key is not cached.
282
+ */
283
+ JEMALLOC_ALWAYS_INLINE bool
284
+ emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
285
+ emap_alloc_ctx_t *alloc_ctx) {
286
+ /* Use the unsafe getter since this may gets called during exit. */
287
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
288
+
289
+ rtree_metadata_t metadata;
290
+ bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
291
+ rtree_ctx, (uintptr_t)ptr, &metadata);
292
+ if (err) {
293
+ return true;
294
+ }
295
+ alloc_ctx->szind = metadata.szind;
296
+ alloc_ctx->slab = metadata.slab;
297
+ return false;
298
+ }
299
+
300
+ /*
301
+ * We want to do batch lookups out of the cache bins, which use
302
+ * cache_bin_ptr_array_get to access the i'th element of the bin (since they
303
+ * invert usual ordering in deciding what to flush). This lets the emap avoid
304
+ * caring about its caller's ordering.
305
+ */
306
+ typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
307
+ /*
308
+ * This allows size-checking assertions, which we can only do while we're in the
309
+ * process of edata lookups.
310
+ */
311
+ typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
312
+
313
+ typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
314
+ union emap_batch_lookup_result_u {
315
+ edata_t *edata;
316
+ rtree_leaf_elm_t *rtree_leaf;
317
+ };
318
+
319
+ JEMALLOC_ALWAYS_INLINE void
320
+ emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
321
+ emap_ptr_getter ptr_getter, void *ptr_getter_ctx,
322
+ emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx,
323
+ emap_batch_lookup_result_t *result) {
324
+ /* Avoids null-checking tsdn in the loop below. */
325
+ util_assume(tsd != NULL);
326
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd);
327
+
328
+ for (size_t i = 0; i < nptrs; i++) {
329
+ const void *ptr = ptr_getter(ptr_getter_ctx, i);
330
+ /*
331
+ * Reuse the edatas array as a temp buffer, lying a little about
332
+ * the types.
333
+ */
334
+ result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd),
335
+ &emap->rtree, rtree_ctx, (uintptr_t)ptr,
336
+ /* dependent */ true, /* init_missing */ false);
337
+ }
338
+
339
+ for (size_t i = 0; i < nptrs; i++) {
340
+ rtree_leaf_elm_t *elm = result[i].rtree_leaf;
341
+ rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
342
+ &emap->rtree, elm, /* dependent */ true);
343
+ result[i].edata = contents.edata;
344
+ emap_full_alloc_ctx_t alloc_ctx;
345
+ /*
346
+ * Not all these fields are read in practice by the metadata
347
+ * visitor. But the compiler can easily optimize away the ones
348
+ * that aren't, so no sense in being incomplete.
349
+ */
350
+ alloc_ctx.szind = contents.metadata.szind;
351
+ alloc_ctx.slab = contents.metadata.slab;
352
+ alloc_ctx.edata = contents.edata;
353
+ metadata_visitor(metadata_visitor_ctx, &alloc_ctx);
354
+ }
355
+ }
356
+
357
+ #endif /* JEMALLOC_INTERNAL_EMAP_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/emitter.h ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EMITTER_H
2
+ #define JEMALLOC_INTERNAL_EMITTER_H
3
+
4
+ #include "jemalloc/internal/ql.h"
5
+
6
+ typedef enum emitter_output_e emitter_output_t;
7
+ enum emitter_output_e {
8
+ emitter_output_json,
9
+ emitter_output_json_compact,
10
+ emitter_output_table
11
+ };
12
+
13
+ typedef enum emitter_justify_e emitter_justify_t;
14
+ enum emitter_justify_e {
15
+ emitter_justify_left,
16
+ emitter_justify_right,
17
+ /* Not for users; just to pass to internal functions. */
18
+ emitter_justify_none
19
+ };
20
+
21
+ typedef enum emitter_type_e emitter_type_t;
22
+ enum emitter_type_e {
23
+ emitter_type_bool,
24
+ emitter_type_int,
25
+ emitter_type_int64,
26
+ emitter_type_unsigned,
27
+ emitter_type_uint32,
28
+ emitter_type_uint64,
29
+ emitter_type_size,
30
+ emitter_type_ssize,
31
+ emitter_type_string,
32
+ /*
33
+ * A title is a column title in a table; it's just a string, but it's
34
+ * not quoted.
35
+ */
36
+ emitter_type_title,
37
+ };
38
+
39
+ typedef struct emitter_col_s emitter_col_t;
40
+ struct emitter_col_s {
41
+ /* Filled in by the user. */
42
+ emitter_justify_t justify;
43
+ int width;
44
+ emitter_type_t type;
45
+ union {
46
+ bool bool_val;
47
+ int int_val;
48
+ unsigned unsigned_val;
49
+ uint32_t uint32_val;
50
+ uint32_t uint32_t_val;
51
+ uint64_t uint64_val;
52
+ uint64_t uint64_t_val;
53
+ size_t size_val;
54
+ ssize_t ssize_val;
55
+ const char *str_val;
56
+ };
57
+
58
+ /* Filled in by initialization. */
59
+ ql_elm(emitter_col_t) link;
60
+ };
61
+
62
+ typedef struct emitter_row_s emitter_row_t;
63
+ struct emitter_row_s {
64
+ ql_head(emitter_col_t) cols;
65
+ };
66
+
67
+ typedef struct emitter_s emitter_t;
68
+ struct emitter_s {
69
+ emitter_output_t output;
70
+ /* The output information. */
71
+ write_cb_t *write_cb;
72
+ void *cbopaque;
73
+ int nesting_depth;
74
+ /* True if we've already emitted a value at the given depth. */
75
+ bool item_at_depth;
76
+ /* True if we emitted a key and will emit corresponding value next. */
77
+ bool emitted_key;
78
+ };
79
+
80
+ static inline bool
81
+ emitter_outputs_json(emitter_t *emitter) {
82
+ return emitter->output == emitter_output_json ||
83
+ emitter->output == emitter_output_json_compact;
84
+ }
85
+
86
+ /* Internal convenience function. Write to the emitter the given string. */
87
+ JEMALLOC_FORMAT_PRINTF(2, 3)
88
+ static inline void
89
+ emitter_printf(emitter_t *emitter, const char *format, ...) {
90
+ va_list ap;
91
+
92
+ va_start(ap, format);
93
+ malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
94
+ va_end(ap);
95
+ }
96
+
97
+ static inline const char * JEMALLOC_FORMAT_ARG(3)
98
+ emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
99
+ emitter_justify_t justify, int width) {
100
+ size_t written;
101
+ fmt_specifier++;
102
+ if (justify == emitter_justify_none) {
103
+ written = malloc_snprintf(out_fmt, out_size,
104
+ "%%%s", fmt_specifier);
105
+ } else if (justify == emitter_justify_left) {
106
+ written = malloc_snprintf(out_fmt, out_size,
107
+ "%%-%d%s", width, fmt_specifier);
108
+ } else {
109
+ written = malloc_snprintf(out_fmt, out_size,
110
+ "%%%d%s", width, fmt_specifier);
111
+ }
112
+ /* Only happens in case of bad format string, which *we* choose. */
113
+ assert(written < out_size);
114
+ return out_fmt;
115
+ }
116
+
117
+ /*
118
+ * Internal. Emit the given value type in the relevant encoding (so that the
119
+ * bool true gets mapped to json "true", but the string "true" gets mapped to
120
+ * json "\"true\"", for instance.
121
+ *
122
+ * Width is ignored if justify is emitter_justify_none.
123
+ */
124
+ static inline void
125
+ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
126
+ emitter_type_t value_type, const void *value) {
127
+ size_t str_written;
128
+ #define BUF_SIZE 256
129
+ #define FMT_SIZE 10
130
+ /*
131
+ * We dynamically generate a format string to emit, to let us use the
132
+ * snprintf machinery. This is kinda hacky, but gets the job done
133
+ * quickly without having to think about the various snprintf edge
134
+ * cases.
135
+ */
136
+ char fmt[FMT_SIZE];
137
+ char buf[BUF_SIZE];
138
+
139
+ #define EMIT_SIMPLE(type, format) \
140
+ emitter_printf(emitter, \
141
+ emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
142
+ *(const type *)value);
143
+
144
+ switch (value_type) {
145
+ case emitter_type_bool:
146
+ emitter_printf(emitter,
147
+ emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
148
+ *(const bool *)value ? "true" : "false");
149
+ break;
150
+ case emitter_type_int:
151
+ EMIT_SIMPLE(int, "%d")
152
+ break;
153
+ case emitter_type_int64:
154
+ EMIT_SIMPLE(int64_t, "%" FMTd64)
155
+ break;
156
+ case emitter_type_unsigned:
157
+ EMIT_SIMPLE(unsigned, "%u")
158
+ break;
159
+ case emitter_type_ssize:
160
+ EMIT_SIMPLE(ssize_t, "%zd")
161
+ break;
162
+ case emitter_type_size:
163
+ EMIT_SIMPLE(size_t, "%zu")
164
+ break;
165
+ case emitter_type_string:
166
+ str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
167
+ *(const char *const *)value);
168
+ /*
169
+ * We control the strings we output; we shouldn't get anything
170
+ * anywhere near the fmt size.
171
+ */
172
+ assert(str_written < BUF_SIZE);
173
+ emitter_printf(emitter,
174
+ emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
175
+ break;
176
+ case emitter_type_uint32:
177
+ EMIT_SIMPLE(uint32_t, "%" FMTu32)
178
+ break;
179
+ case emitter_type_uint64:
180
+ EMIT_SIMPLE(uint64_t, "%" FMTu64)
181
+ break;
182
+ case emitter_type_title:
183
+ EMIT_SIMPLE(char *const, "%s");
184
+ break;
185
+ default:
186
+ unreachable();
187
+ }
188
+ #undef BUF_SIZE
189
+ #undef FMT_SIZE
190
+ }
191
+
192
+
193
+ /* Internal functions. In json mode, tracks nesting state. */
194
+ static inline void
195
+ emitter_nest_inc(emitter_t *emitter) {
196
+ emitter->nesting_depth++;
197
+ emitter->item_at_depth = false;
198
+ }
199
+
200
+ static inline void
201
+ emitter_nest_dec(emitter_t *emitter) {
202
+ emitter->nesting_depth--;
203
+ emitter->item_at_depth = true;
204
+ }
205
+
206
+ static inline void
207
+ emitter_indent(emitter_t *emitter) {
208
+ int amount = emitter->nesting_depth;
209
+ const char *indent_str;
210
+ assert(emitter->output != emitter_output_json_compact);
211
+ if (emitter->output == emitter_output_json) {
212
+ indent_str = "\t";
213
+ } else {
214
+ amount *= 2;
215
+ indent_str = " ";
216
+ }
217
+ for (int i = 0; i < amount; i++) {
218
+ emitter_printf(emitter, "%s", indent_str);
219
+ }
220
+ }
221
+
222
+ static inline void
223
+ emitter_json_key_prefix(emitter_t *emitter) {
224
+ assert(emitter_outputs_json(emitter));
225
+ if (emitter->emitted_key) {
226
+ emitter->emitted_key = false;
227
+ return;
228
+ }
229
+ if (emitter->item_at_depth) {
230
+ emitter_printf(emitter, ",");
231
+ }
232
+ if (emitter->output != emitter_output_json_compact) {
233
+ emitter_printf(emitter, "\n");
234
+ emitter_indent(emitter);
235
+ }
236
+ }
237
+
238
+ /******************************************************************************/
239
+ /* Public functions for emitter_t. */
240
+
241
+ static inline void
242
+ emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
243
+ write_cb_t *write_cb, void *cbopaque) {
244
+ emitter->output = emitter_output;
245
+ emitter->write_cb = write_cb;
246
+ emitter->cbopaque = cbopaque;
247
+ emitter->item_at_depth = false;
248
+ emitter->emitted_key = false;
249
+ emitter->nesting_depth = 0;
250
+ }
251
+
252
+ /******************************************************************************/
253
+ /* JSON public API. */
254
+
255
+ /*
256
+ * Emits a key (e.g. as appears in an object). The next json entity emitted will
257
+ * be the corresponding value.
258
+ */
259
+ static inline void
260
+ emitter_json_key(emitter_t *emitter, const char *json_key) {
261
+ if (emitter_outputs_json(emitter)) {
262
+ emitter_json_key_prefix(emitter);
263
+ emitter_printf(emitter, "\"%s\":%s", json_key,
264
+ emitter->output == emitter_output_json_compact ? "" : " ");
265
+ emitter->emitted_key = true;
266
+ }
267
+ }
268
+
269
+ static inline void
270
+ emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
271
+ const void *value) {
272
+ if (emitter_outputs_json(emitter)) {
273
+ emitter_json_key_prefix(emitter);
274
+ emitter_print_value(emitter, emitter_justify_none, -1,
275
+ value_type, value);
276
+ emitter->item_at_depth = true;
277
+ }
278
+ }
279
+
280
+ /* Shorthand for calling emitter_json_key and then emitter_json_value. */
281
+ static inline void
282
+ emitter_json_kv(emitter_t *emitter, const char *json_key,
283
+ emitter_type_t value_type, const void *value) {
284
+ emitter_json_key(emitter, json_key);
285
+ emitter_json_value(emitter, value_type, value);
286
+ }
287
+
288
+ static inline void
289
+ emitter_json_array_begin(emitter_t *emitter) {
290
+ if (emitter_outputs_json(emitter)) {
291
+ emitter_json_key_prefix(emitter);
292
+ emitter_printf(emitter, "[");
293
+ emitter_nest_inc(emitter);
294
+ }
295
+ }
296
+
297
+ /* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */
298
+ static inline void
299
+ emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
300
+ emitter_json_key(emitter, json_key);
301
+ emitter_json_array_begin(emitter);
302
+ }
303
+
304
+ static inline void
305
+ emitter_json_array_end(emitter_t *emitter) {
306
+ if (emitter_outputs_json(emitter)) {
307
+ assert(emitter->nesting_depth > 0);
308
+ emitter_nest_dec(emitter);
309
+ if (emitter->output != emitter_output_json_compact) {
310
+ emitter_printf(emitter, "\n");
311
+ emitter_indent(emitter);
312
+ }
313
+ emitter_printf(emitter, "]");
314
+ }
315
+ }
316
+
317
+ static inline void
318
+ emitter_json_object_begin(emitter_t *emitter) {
319
+ if (emitter_outputs_json(emitter)) {
320
+ emitter_json_key_prefix(emitter);
321
+ emitter_printf(emitter, "{");
322
+ emitter_nest_inc(emitter);
323
+ }
324
+ }
325
+
326
+ /* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */
327
+ static inline void
328
+ emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
329
+ emitter_json_key(emitter, json_key);
330
+ emitter_json_object_begin(emitter);
331
+ }
332
+
333
+ static inline void
334
+ emitter_json_object_end(emitter_t *emitter) {
335
+ if (emitter_outputs_json(emitter)) {
336
+ assert(emitter->nesting_depth > 0);
337
+ emitter_nest_dec(emitter);
338
+ if (emitter->output != emitter_output_json_compact) {
339
+ emitter_printf(emitter, "\n");
340
+ emitter_indent(emitter);
341
+ }
342
+ emitter_printf(emitter, "}");
343
+ }
344
+ }
345
+
346
+
347
+ /******************************************************************************/
348
+ /* Table public API. */
349
+
350
+ static inline void
351
+ emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
352
+ if (emitter->output == emitter_output_table) {
353
+ emitter_indent(emitter);
354
+ emitter_printf(emitter, "%s\n", table_key);
355
+ emitter_nest_inc(emitter);
356
+ }
357
+ }
358
+
359
+ static inline void
360
+ emitter_table_dict_end(emitter_t *emitter) {
361
+ if (emitter->output == emitter_output_table) {
362
+ emitter_nest_dec(emitter);
363
+ }
364
+ }
365
+
366
+ static inline void
367
+ emitter_table_kv_note(emitter_t *emitter, const char *table_key,
368
+ emitter_type_t value_type, const void *value,
369
+ const char *table_note_key, emitter_type_t table_note_value_type,
370
+ const void *table_note_value) {
371
+ if (emitter->output == emitter_output_table) {
372
+ emitter_indent(emitter);
373
+ emitter_printf(emitter, "%s: ", table_key);
374
+ emitter_print_value(emitter, emitter_justify_none, -1,
375
+ value_type, value);
376
+ if (table_note_key != NULL) {
377
+ emitter_printf(emitter, " (%s: ", table_note_key);
378
+ emitter_print_value(emitter, emitter_justify_none, -1,
379
+ table_note_value_type, table_note_value);
380
+ emitter_printf(emitter, ")");
381
+ }
382
+ emitter_printf(emitter, "\n");
383
+ }
384
+ emitter->item_at_depth = true;
385
+ }
386
+
387
+ static inline void
388
+ emitter_table_kv(emitter_t *emitter, const char *table_key,
389
+ emitter_type_t value_type, const void *value) {
390
+ emitter_table_kv_note(emitter, table_key, value_type, value, NULL,
391
+ emitter_type_bool, NULL);
392
+ }
393
+
394
+
395
+ /* Write to the emitter the given string, but only in table mode. */
396
+ JEMALLOC_FORMAT_PRINTF(2, 3)
397
+ static inline void
398
+ emitter_table_printf(emitter_t *emitter, const char *format, ...) {
399
+ if (emitter->output == emitter_output_table) {
400
+ va_list ap;
401
+ va_start(ap, format);
402
+ malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
403
+ va_end(ap);
404
+ }
405
+ }
406
+
407
+ static inline void
408
+ emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
409
+ if (emitter->output != emitter_output_table) {
410
+ return;
411
+ }
412
+ emitter_col_t *col;
413
+ ql_foreach(col, &row->cols, link) {
414
+ emitter_print_value(emitter, col->justify, col->width,
415
+ col->type, (const void *)&col->bool_val);
416
+ }
417
+ emitter_table_printf(emitter, "\n");
418
+ }
419
+
420
+ static inline void
421
+ emitter_row_init(emitter_row_t *row) {
422
+ ql_new(&row->cols);
423
+ }
424
+
425
+ static inline void
426
+ emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
427
+ ql_elm_new(col, link);
428
+ ql_tail_insert(&row->cols, col, link);
429
+ }
430
+
431
+
432
+ /******************************************************************************/
433
+ /*
434
+ * Generalized public API. Emits using either JSON or table, according to
435
+ * settings in the emitter_t. */
436
+
437
+ /*
438
+ * Note emits a different kv pair as well, but only in table mode. Omits the
439
+ * note if table_note_key is NULL.
440
+ */
441
+ static inline void
442
+ emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
443
+ emitter_type_t value_type, const void *value,
444
+ const char *table_note_key, emitter_type_t table_note_value_type,
445
+ const void *table_note_value) {
446
+ if (emitter_outputs_json(emitter)) {
447
+ emitter_json_key(emitter, json_key);
448
+ emitter_json_value(emitter, value_type, value);
449
+ } else {
450
+ emitter_table_kv_note(emitter, table_key, value_type, value,
451
+ table_note_key, table_note_value_type, table_note_value);
452
+ }
453
+ emitter->item_at_depth = true;
454
+ }
455
+
456
+ static inline void
457
+ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
458
+ emitter_type_t value_type, const void *value) {
459
+ emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
460
+ emitter_type_bool, NULL);
461
+ }
462
+
463
+ static inline void
464
+ emitter_dict_begin(emitter_t *emitter, const char *json_key,
465
+ const char *table_header) {
466
+ if (emitter_outputs_json(emitter)) {
467
+ emitter_json_key(emitter, json_key);
468
+ emitter_json_object_begin(emitter);
469
+ } else {
470
+ emitter_table_dict_begin(emitter, table_header);
471
+ }
472
+ }
473
+
474
+ static inline void
475
+ emitter_dict_end(emitter_t *emitter) {
476
+ if (emitter_outputs_json(emitter)) {
477
+ emitter_json_object_end(emitter);
478
+ } else {
479
+ emitter_table_dict_end(emitter);
480
+ }
481
+ }
482
+
483
+ static inline void
484
+ emitter_begin(emitter_t *emitter) {
485
+ if (emitter_outputs_json(emitter)) {
486
+ assert(emitter->nesting_depth == 0);
487
+ emitter_printf(emitter, "{");
488
+ emitter_nest_inc(emitter);
489
+ } else {
490
+ /*
491
+ * This guarantees that we always call write_cb at least once.
492
+ * This is useful if some invariant is established by each call
493
+ * to write_cb, but doesn't hold initially: e.g., some buffer
494
+ * holds a null-terminated string.
495
+ */
496
+ emitter_printf(emitter, "%s", "");
497
+ }
498
+ }
499
+
500
+ static inline void
501
+ emitter_end(emitter_t *emitter) {
502
+ if (emitter_outputs_json(emitter)) {
503
+ assert(emitter->nesting_depth == 1);
504
+ emitter_nest_dec(emitter);
505
+ emitter_printf(emitter, "%s", emitter->output ==
506
+ emitter_output_json_compact ? "}" : "\n}\n");
507
+ }
508
+ }
509
+
510
+ #endif /* JEMALLOC_INTERNAL_EMITTER_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/eset.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_ESET_H
2
+ #define JEMALLOC_INTERNAL_ESET_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/fb.h"
6
+ #include "jemalloc/internal/edata.h"
7
+ #include "jemalloc/internal/mutex.h"
8
+
9
+ /*
10
+ * An eset ("extent set") is a quantized collection of extents, with built-in
11
+ * LRU queue.
12
+ *
13
+ * This class is not thread-safe; synchronization must be done externally if
14
+ * there are mutating operations. One exception is the stats counters, which
15
+ * may be read without any locking.
16
+ */
17
+
18
+ typedef struct eset_bin_s eset_bin_t;
19
+ struct eset_bin_s {
20
+ edata_heap_t heap;
21
+ /*
22
+ * We do first-fit across multiple size classes. If we compared against
23
+ * the min element in each heap directly, we'd take a cache miss per
24
+ * extent we looked at. If we co-locate the edata summaries, we only
25
+ * take a miss on the edata we're actually going to return (which is
26
+ * inevitable anyways).
27
+ */
28
+ edata_cmp_summary_t heap_min;
29
+ };
30
+
31
+ typedef struct eset_bin_stats_s eset_bin_stats_t;
32
+ struct eset_bin_stats_s {
33
+ atomic_zu_t nextents;
34
+ atomic_zu_t nbytes;
35
+ };
36
+
37
+ typedef struct eset_s eset_t;
38
+ struct eset_s {
39
+ /* Bitmap for which set bits correspond to non-empty heaps. */
40
+ fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)];
41
+
42
+ /* Quantized per size class heaps of extents. */
43
+ eset_bin_t bins[SC_NPSIZES + 1];
44
+
45
+ eset_bin_stats_t bin_stats[SC_NPSIZES + 1];
46
+
47
+ /* LRU of all extents in heaps. */
48
+ edata_list_inactive_t lru;
49
+
50
+ /* Page sum for all extents in heaps. */
51
+ atomic_zu_t npages;
52
+
53
+ /*
54
+ * A duplication of the data in the containing ecache. We use this only
55
+ * for assertions on the states of the passed-in extents.
56
+ */
57
+ extent_state_t state;
58
+ };
59
+
60
+ void eset_init(eset_t *eset, extent_state_t state);
61
+
62
+ size_t eset_npages_get(eset_t *eset);
63
+ /* Get the number of extents in the given page size index. */
64
+ size_t eset_nextents_get(eset_t *eset, pszind_t ind);
65
+ /* Get the sum total bytes of the extents in the given page size index. */
66
+ size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
67
+
68
+ void eset_insert(eset_t *eset, edata_t *edata);
69
+ void eset_remove(eset_t *eset, edata_t *edata);
70
+ /*
71
+ * Select an extent from this eset of the given size and alignment. Returns
72
+ * null if no such item could be found.
73
+ */
74
+ edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
75
+ unsigned lg_max_fit);
76
+
77
+ #endif /* JEMALLOC_INTERNAL_ESET_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/exp_grow.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EXP_GROW_H
2
+ #define JEMALLOC_INTERNAL_EXP_GROW_H
3
+
4
+ typedef struct exp_grow_s exp_grow_t;
5
+ struct exp_grow_s {
6
+ /*
7
+ * Next extent size class in a growing series to use when satisfying a
8
+ * request via the extent hooks (only if opt_retain). This limits the
9
+ * number of disjoint virtual memory ranges so that extent merging can
10
+ * be effective even if multiple arenas' extent allocation requests are
11
+ * highly interleaved.
12
+ *
13
+ * retain_grow_limit is the max allowed size ind to expand (unless the
14
+ * required size is greater). Default is no limit, and controlled
15
+ * through mallctl only.
16
+ */
17
+ pszind_t next;
18
+ pszind_t limit;
19
+ };
20
+
21
+ static inline bool
22
+ exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
23
+ size_t *r_alloc_size, pszind_t *r_skip) {
24
+ *r_skip = 0;
25
+ *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
26
+ while (*r_alloc_size < alloc_size_min) {
27
+ (*r_skip)++;
28
+ if (exp_grow->next + *r_skip >=
29
+ sz_psz2ind(SC_LARGE_MAXCLASS)) {
30
+ /* Outside legal range. */
31
+ return true;
32
+ }
33
+ *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
34
+ }
35
+ return false;
36
+ }
37
+
38
+ static inline void
39
+ exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
40
+ if (exp_grow->next + skip + 1 <= exp_grow->limit) {
41
+ exp_grow->next += skip + 1;
42
+ } else {
43
+ exp_grow->next = exp_grow->limit;
44
+ }
45
+
46
+ }
47
+
48
+ void exp_grow_init(exp_grow_t *exp_grow);
49
+
50
+ #endif /* JEMALLOC_INTERNAL_EXP_GROW_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/extent.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EXTENT_H
2
+ #define JEMALLOC_INTERNAL_EXTENT_H
3
+
4
+ #include "jemalloc/internal/ecache.h"
5
+ #include "jemalloc/internal/ehooks.h"
6
+ #include "jemalloc/internal/ph.h"
7
+ #include "jemalloc/internal/rtree.h"
8
+
9
+ /*
10
+ * This module contains the page-level allocator. It chooses the addresses that
11
+ * allocations requested by other modules will inhabit, and updates the global
12
+ * metadata to reflect allocation/deallocation/purging decisions.
13
+ */
14
+
15
+ /*
16
+ * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
17
+ * is the max ratio between the size of the active extent and the new extent.
18
+ */
19
+ #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
20
+ extern size_t opt_lg_extent_max_active_fit;
21
+
22
+ edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
23
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
24
+ bool zero, bool guarded);
25
+ edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
26
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
27
+ bool zero, bool guarded);
28
+ void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
29
+ ecache_t *ecache, edata_t *edata);
30
+ edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
31
+ ecache_t *ecache, size_t npages_min);
32
+
33
+ void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
34
+ void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
35
+ edata_t *edata);
36
+ void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
37
+ edata_t *edata);
38
+ edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
39
+ void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
40
+ bool growing_retained);
41
+ void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42
+ edata_t *edata);
43
+ void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
44
+ edata_t *edata);
45
+ bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
46
+ size_t offset, size_t length);
47
+ bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
48
+ size_t offset, size_t length);
49
+ bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
50
+ size_t offset, size_t length);
51
+ bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
52
+ size_t offset, size_t length);
53
+ edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
54
+ ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
55
+ bool holding_core_locks);
56
+ bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
57
+ edata_t *a, edata_t *b);
58
+ bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
59
+ bool commit, bool zero, bool growing_retained);
60
+ size_t extent_sn_next(pac_t *pac);
61
+ bool extent_boot(void);
62
+
63
+ JEMALLOC_ALWAYS_INLINE bool
64
+ extent_neighbor_head_state_mergeable(bool edata_is_head,
65
+ bool neighbor_is_head, bool forward) {
66
+ /*
67
+ * Head states checking: disallow merging if the higher addr extent is a
68
+ * head extent. This helps preserve first-fit, and more importantly
69
+ * makes sure no merge across arenas.
70
+ */
71
+ if (forward) {
72
+ if (neighbor_is_head) {
73
+ return false;
74
+ }
75
+ } else {
76
+ if (edata_is_head) {
77
+ return false;
78
+ }
79
+ }
80
+ return true;
81
+ }
82
+
83
+ JEMALLOC_ALWAYS_INLINE bool
84
+ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
85
+ extent_pai_t pai, extent_state_t expected_state, bool forward,
86
+ bool expanding) {
87
+ edata_t *neighbor = contents.edata;
88
+ if (neighbor == NULL) {
89
+ return false;
90
+ }
91
+ /* It's not safe to access *neighbor yet; must verify states first. */
92
+ bool neighbor_is_head = contents.metadata.is_head;
93
+ if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
94
+ neighbor_is_head, forward)) {
95
+ return false;
96
+ }
97
+ extent_state_t neighbor_state = contents.metadata.state;
98
+ if (pai == EXTENT_PAI_PAC) {
99
+ if (neighbor_state != expected_state) {
100
+ return false;
101
+ }
102
+ /* From this point, it's safe to access *neighbor. */
103
+ if (!expanding && (edata_committed_get(edata) !=
104
+ edata_committed_get(neighbor))) {
105
+ /*
106
+ * Some platforms (e.g. Windows) require an explicit
107
+ * commit step (and writing to uncommitted memory is not
108
+ * allowed).
109
+ */
110
+ return false;
111
+ }
112
+ } else {
113
+ if (neighbor_state == extent_state_active) {
114
+ return false;
115
+ }
116
+ /* From this point, it's safe to access *neighbor. */
117
+ }
118
+
119
+ assert(edata_pai_get(edata) == pai);
120
+ if (edata_pai_get(neighbor) != pai) {
121
+ return false;
122
+ }
123
+ if (opt_retain) {
124
+ assert(edata_arena_ind_get(edata) ==
125
+ edata_arena_ind_get(neighbor));
126
+ } else {
127
+ if (edata_arena_ind_get(edata) !=
128
+ edata_arena_ind_get(neighbor)) {
129
+ return false;
130
+ }
131
+ }
132
+ assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
133
+
134
+ return true;
135
+ }
136
+
137
+ #endif /* JEMALLOC_INTERNAL_EXTENT_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/extent_dss.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
2
+ #define JEMALLOC_INTERNAL_EXTENT_DSS_H
3
+
4
+ typedef enum {
5
+ dss_prec_disabled = 0,
6
+ dss_prec_primary = 1,
7
+ dss_prec_secondary = 2,
8
+
9
+ dss_prec_limit = 3
10
+ } dss_prec_t;
11
+ #define DSS_PREC_DEFAULT dss_prec_secondary
12
+ #define DSS_DEFAULT "secondary"
13
+
14
+ extern const char *dss_prec_names[];
15
+
16
+ extern const char *opt_dss;
17
+
18
+ dss_prec_t extent_dss_prec_get(void);
19
+ bool extent_dss_prec_set(dss_prec_t dss_prec);
20
+ void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
21
+ size_t size, size_t alignment, bool *zero, bool *commit);
22
+ bool extent_in_dss(void *addr);
23
+ bool extent_dss_mergeable(void *addr_a, void *addr_b);
24
+ void extent_dss_boot(void);
25
+
26
+ #endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/extent_mmap.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
2
+ #define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
3
+
4
+ extern bool opt_retain;
5
+
6
+ void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
7
+ bool *zero, bool *commit);
8
+ bool extent_dalloc_mmap(void *addr, size_t size);
9
+
10
+ #endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/fb.h ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_FB_H
2
+ #define JEMALLOC_INTERNAL_FB_H
3
+
4
+ /*
5
+ * The flat bitmap module. This has a larger API relative to the bitmap module
6
+ * (supporting things like backwards searches, and searching for both set and
7
+ * unset bits), at the cost of slower operations for very large bitmaps.
8
+ *
9
+ * Initialized flat bitmaps start at all-zeros (all bits unset).
10
+ */
11
+
12
+ typedef unsigned long fb_group_t;
13
+ #define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
14
+ #define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
15
+ + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
16
+
17
+ static inline void
18
+ fb_init(fb_group_t *fb, size_t nbits) {
19
+ size_t ngroups = FB_NGROUPS(nbits);
20
+ memset(fb, 0, ngroups * sizeof(fb_group_t));
21
+ }
22
+
23
+ static inline bool
24
+ fb_empty(fb_group_t *fb, size_t nbits) {
25
+ size_t ngroups = FB_NGROUPS(nbits);
26
+ for (size_t i = 0; i < ngroups; i++) {
27
+ if (fb[i] != 0) {
28
+ return false;
29
+ }
30
+ }
31
+ return true;
32
+ }
33
+
34
+ static inline bool
35
+ fb_full(fb_group_t *fb, size_t nbits) {
36
+ size_t ngroups = FB_NGROUPS(nbits);
37
+ size_t trailing_bits = nbits % FB_GROUP_BITS;
38
+ size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1);
39
+ for (size_t i = 0; i < limit; i++) {
40
+ if (fb[i] != ~(fb_group_t)0) {
41
+ return false;
42
+ }
43
+ }
44
+ if (trailing_bits == 0) {
45
+ return true;
46
+ }
47
+ return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1;
48
+ }
49
+
50
+ static inline bool
51
+ fb_get(fb_group_t *fb, size_t nbits, size_t bit) {
52
+ assert(bit < nbits);
53
+ size_t group_ind = bit / FB_GROUP_BITS;
54
+ size_t bit_ind = bit % FB_GROUP_BITS;
55
+ return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind));
56
+ }
57
+
58
+ static inline void
59
+ fb_set(fb_group_t *fb, size_t nbits, size_t bit) {
60
+ assert(bit < nbits);
61
+ size_t group_ind = bit / FB_GROUP_BITS;
62
+ size_t bit_ind = bit % FB_GROUP_BITS;
63
+ fb[group_ind] |= ((fb_group_t)1 << bit_ind);
64
+ }
65
+
66
+ static inline void
67
+ fb_unset(fb_group_t *fb, size_t nbits, size_t bit) {
68
+ assert(bit < nbits);
69
+ size_t group_ind = bit / FB_GROUP_BITS;
70
+ size_t bit_ind = bit % FB_GROUP_BITS;
71
+ fb[group_ind] &= ~((fb_group_t)1 << bit_ind);
72
+ }
73
+
74
+
75
+ /*
76
+ * Some implementation details. This visitation function lets us apply a group
77
+ * visitor to each group in the bitmap (potentially modifying it). The mask
78
+ * indicates which bits are logically part of the visitation.
79
+ */
80
+ typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask);
81
+ JEMALLOC_ALWAYS_INLINE void
82
+ fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
83
+ size_t start, size_t cnt) {
84
+ assert(cnt > 0);
85
+ assert(start + cnt <= nbits);
86
+ size_t group_ind = start / FB_GROUP_BITS;
87
+ size_t start_bit_ind = start % FB_GROUP_BITS;
88
+ /*
89
+ * The first group is special; it's the only one we don't start writing
90
+ * to from bit 0.
91
+ */
92
+ size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS
93
+ ? FB_GROUP_BITS - start_bit_ind : cnt);
94
+ /*
95
+ * We can basically split affected words into:
96
+ * - The first group, where we touch only the high bits
97
+ * - The last group, where we touch only the low bits
98
+ * - The middle, where we set all the bits to the same thing.
99
+ * We treat each case individually. The last two could be merged, but
100
+ * this can lead to bad codegen for those middle words.
101
+ */
102
+ /* First group */
103
+ fb_group_t mask = ((~(fb_group_t)0)
104
+ >> (FB_GROUP_BITS - first_group_cnt))
105
+ << start_bit_ind;
106
+ visit(ctx, &fb[group_ind], mask);
107
+
108
+ cnt -= first_group_cnt;
109
+ group_ind++;
110
+ /* Middle groups */
111
+ while (cnt > FB_GROUP_BITS) {
112
+ visit(ctx, &fb[group_ind], ~(fb_group_t)0);
113
+ cnt -= FB_GROUP_BITS;
114
+ group_ind++;
115
+ }
116
+ /* Last group */
117
+ if (cnt != 0) {
118
+ mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt);
119
+ visit(ctx, &fb[group_ind], mask);
120
+ }
121
+ }
122
+
123
+ JEMALLOC_ALWAYS_INLINE void
124
+ fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
125
+ bool val = *(bool *)ctx;
126
+ if (val) {
127
+ *fb |= mask;
128
+ } else {
129
+ *fb &= ~mask;
130
+ }
131
+ }
132
+
133
+ /* Sets the cnt bits starting at position start. Must not have a 0 count. */
134
+ static inline void
135
+ fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
136
+ bool val = true;
137
+ fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
138
+ }
139
+
140
+ /* Unsets the cnt bits starting at position start. Must not have a 0 count. */
141
+ static inline void
142
+ fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
143
+ bool val = false;
144
+ fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
145
+ }
146
+
147
+ JEMALLOC_ALWAYS_INLINE void
148
+ fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
149
+ size_t *scount = (size_t *)ctx;
150
+ *scount += popcount_lu(*fb & mask);
151
+ }
152
+
153
+ /* Finds the number of set bit in the of length cnt starting at start. */
154
+ JEMALLOC_ALWAYS_INLINE size_t
155
+ fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
156
+ size_t scount = 0;
157
+ fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt);
158
+ return scount;
159
+ }
160
+
161
+ /* Finds the number of unset bit in the of length cnt starting at start. */
162
+ JEMALLOC_ALWAYS_INLINE size_t
163
+ fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
164
+ size_t scount = fb_scount(fb, nbits, start, cnt);
165
+ return cnt - scount;
166
+ }
167
+
168
+ /*
169
+ * An implementation detail; find the first bit at position >= min_bit with the
170
+ * value val.
171
+ *
172
+ * Returns the number of bits in the bitmap if no such bit exists.
173
+ */
174
+ JEMALLOC_ALWAYS_INLINE ssize_t
175
+ fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val,
176
+ bool forward) {
177
+ assert(start < nbits);
178
+ size_t ngroups = FB_NGROUPS(nbits);
179
+ ssize_t group_ind = start / FB_GROUP_BITS;
180
+ size_t bit_ind = start % FB_GROUP_BITS;
181
+
182
+ fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1);
183
+
184
+ fb_group_t group = fb[group_ind];
185
+ group ^= maybe_invert;
186
+ if (forward) {
187
+ /* Only keep ones in bits bit_ind and above. */
188
+ group &= ~((1LU << bit_ind) - 1);
189
+ } else {
190
+ /*
191
+ * Only keep ones in bits bit_ind and below. You might more
192
+ * naturally express this as (1 << (bit_ind + 1)) - 1, but
193
+ * that shifts by an invalid amount if bit_ind is one less than
194
+ * FB_GROUP_BITS.
195
+ */
196
+ group &= ((2LU << bit_ind) - 1);
197
+ }
198
+ ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1;
199
+ while (group == 0) {
200
+ group_ind += forward ? 1 : -1;
201
+ if (group_ind == group_ind_bound) {
202
+ return forward ? (ssize_t)nbits : (ssize_t)-1;
203
+ }
204
+ group = fb[group_ind];
205
+ group ^= maybe_invert;
206
+ }
207
+ assert(group != 0);
208
+ size_t bit = forward ? ffs_lu(group) : fls_lu(group);
209
+ size_t pos = group_ind * FB_GROUP_BITS + bit;
210
+ /*
211
+ * The high bits of a partially filled last group are zeros, so if we're
212
+ * looking for zeros we don't want to report an invalid result.
213
+ */
214
+ if (forward && !val && pos > nbits) {
215
+ return nbits;
216
+ }
217
+ return pos;
218
+ }
219
+
220
+ /*
221
+ * Find the first set bit in the bitmap with an index >= min_bit. Returns the
222
+ * number of bits in the bitmap if no such bit exists.
223
+ */
224
+ static inline size_t
225
+ fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) {
226
+ return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false,
227
+ /* forward */ true);
228
+ }
229
+
230
+ /* The same, but looks for an unset bit. */
231
+ static inline size_t
232
+ fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
233
+ return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true,
234
+ /* forward */ true);
235
+ }
236
+
237
+ /*
238
+ * Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if
239
+ * no such bit exists.
240
+ */
241
+ static inline ssize_t
242
+ fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) {
243
+ return fb_find_impl(fb, nbits, max_bit, /* val */ false,
244
+ /* forward */ false);
245
+ }
246
+
247
+ static inline ssize_t
248
+ fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) {
249
+ return fb_find_impl(fb, nbits, max_bit, /* val */ true,
250
+ /* forward */ false);
251
+ }
252
+
253
+ /* Returns whether or not we found a range. */
254
+ JEMALLOC_ALWAYS_INLINE bool
255
+ fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
256
+ size_t *r_len, bool val, bool forward) {
257
+ assert(start < nbits);
258
+ ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward);
259
+ if ((forward && next_range_begin == (ssize_t)nbits)
260
+ || (!forward && next_range_begin == (ssize_t)-1)) {
261
+ return false;
262
+ }
263
+ /* Half open range; the set bits are [begin, end). */
264
+ ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val,
265
+ forward);
266
+ if (forward) {
267
+ *r_begin = next_range_begin;
268
+ *r_len = next_range_end - next_range_begin;
269
+ } else {
270
+ *r_begin = next_range_end + 1;
271
+ *r_len = next_range_begin - next_range_end;
272
+ }
273
+ return true;
274
+ }
275
+
276
+ /*
277
+ * Used to iterate through ranges of set bits.
278
+ *
279
+ * Tries to find the next contiguous sequence of set bits with a first index >=
280
+ * start. If one exists, puts the earliest bit of the range in *r_begin, its
281
+ * length in *r_len, and returns true. Otherwise, returns false (without
282
+ * touching *r_begin or *r_end).
283
+ */
284
+ static inline bool
285
+ fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
286
+ size_t *r_len) {
287
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
288
+ /* val */ true, /* forward */ true);
289
+ }
290
+
291
+ /*
292
+ * The same as fb_srange_iter, but searches backwards from start rather than
293
+ * forwards. (The position returned is still the earliest bit in the range).
294
+ */
295
+ static inline bool
296
+ fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
297
+ size_t *r_len) {
298
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
299
+ /* val */ true, /* forward */ false);
300
+ }
301
+
302
+ /* Similar to fb_srange_iter, but searches for unset bits. */
303
+ static inline bool
304
+ fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
305
+ size_t *r_len) {
306
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
307
+ /* val */ false, /* forward */ true);
308
+ }
309
+
310
+ /* Similar to fb_srange_riter, but searches for unset bits. */
311
+ static inline bool
312
+ fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
313
+ size_t *r_len) {
314
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
315
+ /* val */ false, /* forward */ false);
316
+ }
317
+
318
+ JEMALLOC_ALWAYS_INLINE size_t
319
+ fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
320
+ size_t begin = 0;
321
+ size_t longest_len = 0;
322
+ size_t len = 0;
323
+ while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin,
324
+ &len, val, /* forward */ true)) {
325
+ if (len > longest_len) {
326
+ longest_len = len;
327
+ }
328
+ begin += len;
329
+ }
330
+ return longest_len;
331
+ }
332
+
333
+ static inline size_t
334
+ fb_srange_longest(fb_group_t *fb, size_t nbits) {
335
+ return fb_range_longest_impl(fb, nbits, /* val */ true);
336
+ }
337
+
338
+ static inline size_t
339
+ fb_urange_longest(fb_group_t *fb, size_t nbits) {
340
+ return fb_range_longest_impl(fb, nbits, /* val */ false);
341
+ }
342
+
343
+ /*
344
+ * Initializes each bit of dst with the bitwise-AND of the corresponding bits of
345
+ * src1 and src2. All bitmaps must be the same size.
346
+ */
347
+ static inline void
348
+ fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
349
+ size_t ngroups = FB_NGROUPS(nbits);
350
+ for (size_t i = 0; i < ngroups; i++) {
351
+ dst[i] = src1[i] & src2[i];
352
+ }
353
+ }
354
+
355
+ /* Like fb_bit_and, but with bitwise-OR. */
356
+ static inline void
357
+ fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
358
+ size_t ngroups = FB_NGROUPS(nbits);
359
+ for (size_t i = 0; i < ngroups; i++) {
360
+ dst[i] = src1[i] | src2[i];
361
+ }
362
+ }
363
+
364
+ /* Initializes dst bit i to the negation of source bit i. */
365
+ static inline void
366
+ fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) {
367
+ size_t ngroups = FB_NGROUPS(nbits);
368
+ for (size_t i = 0; i < ngroups; i++) {
369
+ dst[i] = ~src[i];
370
+ }
371
+ }
372
+
373
+ #endif /* JEMALLOC_INTERNAL_FB_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/fxp.h ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_FXP_H
2
+ #define JEMALLOC_INTERNAL_FXP_H
3
+
4
+ /*
5
+ * A simple fixed-point math implementation, supporting only unsigned values
6
+ * (with overflow being an error).
7
+ *
8
+ * It's not in general safe to use floating point in core code, because various
9
+ * libc implementations we get linked against can assume that malloc won't touch
10
+ * floating point state and call it with an unusual calling convention.
11
+ */
12
+
13
+ /*
14
+ * High 16 bits are the integer part, low 16 are the fractional part. Or
15
+ * equivalently, repr == 2**16 * val, where we use "val" to refer to the
16
+ * (imaginary) fractional representation of the true value.
17
+ *
18
+ * We pick a uint32_t here since it's convenient in some places to
19
+ * double the representation size (i.e. multiplication and division use
20
+ * 64-bit integer types), and a uint64_t is the largest type we're
21
+ * certain is available.
22
+ */
23
+ typedef uint32_t fxp_t;
24
+ #define FXP_INIT_INT(x) ((x) << 16)
25
+ #define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100)
26
+
27
+ /*
28
+ * Amount of precision used in parsing and printing numbers. The integer bound
29
+ * is simply because the integer part of the number gets 16 bits, and so is
30
+ * bounded by 65536.
31
+ *
32
+ * We use a lot of precision for the fractional part, even though most of it
33
+ * gets rounded off; this lets us get exact values for the important special
34
+ * case where the denominator is a small power of 2 (for instance,
35
+ * 1/512 == 0.001953125 is exactly representable even with only 16 bits of
36
+ * fractional precision). We need to left-shift by 16 before dividing by
37
+ * 10**precision, so we pick precision to be floor(log(2**48)) = 14.
38
+ */
39
+ #define FXP_INTEGER_PART_DIGITS 5
40
+ #define FXP_FRACTIONAL_PART_DIGITS 14
41
+
42
+ /*
43
+ * In addition to the integer and fractional parts of the number, we need to
44
+ * include a null character and (possibly) a decimal point.
45
+ */
46
+ #define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2)
47
+
48
+ static inline fxp_t
49
+ fxp_add(fxp_t a, fxp_t b) {
50
+ return a + b;
51
+ }
52
+
53
+ static inline fxp_t
54
+ fxp_sub(fxp_t a, fxp_t b) {
55
+ assert(a >= b);
56
+ return a - b;
57
+ }
58
+
59
+ static inline fxp_t
60
+ fxp_mul(fxp_t a, fxp_t b) {
61
+ uint64_t unshifted = (uint64_t)a * (uint64_t)b;
62
+ /*
63
+ * Unshifted is (a.val * 2**16) * (b.val * 2**16)
64
+ * == (a.val * b.val) * 2**32, but we want
65
+ * (a.val * b.val) * 2 ** 16.
66
+ */
67
+ return (uint32_t)(unshifted >> 16);
68
+ }
69
+
70
+ static inline fxp_t
71
+ fxp_div(fxp_t a, fxp_t b) {
72
+ assert(b != 0);
73
+ uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b;
74
+ /*
75
+ * Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16)
76
+ * == (a.val / b.val) * (2 ** 32), which again corresponds to a right
77
+ * shift of 16.
78
+ */
79
+ return (uint32_t)(unshifted >> 16);
80
+ }
81
+
82
+ static inline uint32_t
83
+ fxp_round_down(fxp_t a) {
84
+ return a >> 16;
85
+ }
86
+
87
+ static inline uint32_t
88
+ fxp_round_nearest(fxp_t a) {
89
+ uint32_t fractional_part = (a & ((1U << 16) - 1));
90
+ uint32_t increment = (uint32_t)(fractional_part >= (1U << 15));
91
+ return (a >> 16) + increment;
92
+ }
93
+
94
+ /*
95
+ * Approximately computes x * frac, without the size limitations that would be
96
+ * imposed by converting u to an fxp_t.
97
+ */
98
+ static inline size_t
99
+ fxp_mul_frac(size_t x_orig, fxp_t frac) {
100
+ assert(frac <= (1U << 16));
101
+ /*
102
+ * Work around an over-enthusiastic warning about type limits below (on
103
+ * 32-bit platforms, a size_t is always less than 1ULL << 48).
104
+ */
105
+ uint64_t x = (uint64_t)x_orig;
106
+ /*
107
+ * If we can guarantee no overflow, multiply first before shifting, to
108
+ * preserve some precision. Otherwise, shift first and then multiply.
109
+ * In the latter case, we only lose the low 16 bits of a 48-bit number,
110
+ * so we're still accurate to within 1/2**32.
111
+ */
112
+ if (x < (1ULL << 48)) {
113
+ return (size_t)((x * frac) >> 16);
114
+ } else {
115
+ return (size_t)((x >> 16) * (uint64_t)frac);
116
+ }
117
+ }
118
+
119
+ /*
120
+ * Returns true on error. Otherwise, returns false and updates *ptr to point to
121
+ * the first character not parsed (because it wasn't a digit).
122
+ */
123
+ bool fxp_parse(fxp_t *a, const char *ptr, char **end);
124
+ void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]);
125
+
126
+ #endif /* JEMALLOC_INTERNAL_FXP_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hash.h ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_HASH_H
2
+ #define JEMALLOC_INTERNAL_HASH_H
3
+
4
+ #include "jemalloc/internal/assert.h"
5
+
6
+ /*
7
+ * The following hash function is based on MurmurHash3, placed into the public
8
+ * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
9
+ * details.
10
+ */
11
+
12
+ /******************************************************************************/
13
+ /* Internal implementation. */
14
+ static inline uint32_t
15
+ hash_rotl_32(uint32_t x, int8_t r) {
16
+ return ((x << r) | (x >> (32 - r)));
17
+ }
18
+
19
+ static inline uint64_t
20
+ hash_rotl_64(uint64_t x, int8_t r) {
21
+ return ((x << r) | (x >> (64 - r)));
22
+ }
23
+
24
+ static inline uint32_t
25
+ hash_get_block_32(const uint32_t *p, int i) {
26
+ /* Handle unaligned read. */
27
+ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
28
+ uint32_t ret;
29
+
30
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
31
+ return ret;
32
+ }
33
+
34
+ return p[i];
35
+ }
36
+
37
+ static inline uint64_t
38
+ hash_get_block_64(const uint64_t *p, int i) {
39
+ /* Handle unaligned read. */
40
+ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
41
+ uint64_t ret;
42
+
43
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
44
+ return ret;
45
+ }
46
+
47
+ return p[i];
48
+ }
49
+
50
+ static inline uint32_t
51
+ hash_fmix_32(uint32_t h) {
52
+ h ^= h >> 16;
53
+ h *= 0x85ebca6b;
54
+ h ^= h >> 13;
55
+ h *= 0xc2b2ae35;
56
+ h ^= h >> 16;
57
+
58
+ return h;
59
+ }
60
+
61
+ static inline uint64_t
62
+ hash_fmix_64(uint64_t k) {
63
+ k ^= k >> 33;
64
+ k *= KQU(0xff51afd7ed558ccd);
65
+ k ^= k >> 33;
66
+ k *= KQU(0xc4ceb9fe1a85ec53);
67
+ k ^= k >> 33;
68
+
69
+ return k;
70
+ }
71
+
72
+ static inline uint32_t
73
+ hash_x86_32(const void *key, int len, uint32_t seed) {
74
+ const uint8_t *data = (const uint8_t *) key;
75
+ const int nblocks = len / 4;
76
+
77
+ uint32_t h1 = seed;
78
+
79
+ const uint32_t c1 = 0xcc9e2d51;
80
+ const uint32_t c2 = 0x1b873593;
81
+
82
+ /* body */
83
+ {
84
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
85
+ int i;
86
+
87
+ for (i = -nblocks; i; i++) {
88
+ uint32_t k1 = hash_get_block_32(blocks, i);
89
+
90
+ k1 *= c1;
91
+ k1 = hash_rotl_32(k1, 15);
92
+ k1 *= c2;
93
+
94
+ h1 ^= k1;
95
+ h1 = hash_rotl_32(h1, 13);
96
+ h1 = h1*5 + 0xe6546b64;
97
+ }
98
+ }
99
+
100
+ /* tail */
101
+ {
102
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
103
+
104
+ uint32_t k1 = 0;
105
+
106
+ switch (len & 3) {
107
+ case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH;
108
+ case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH;
109
+ case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
110
+ k1 *= c2; h1 ^= k1;
111
+ }
112
+ }
113
+
114
+ /* finalization */
115
+ h1 ^= len;
116
+
117
+ h1 = hash_fmix_32(h1);
118
+
119
+ return h1;
120
+ }
121
+
122
+ static inline void
123
+ hash_x86_128(const void *key, const int len, uint32_t seed,
124
+ uint64_t r_out[2]) {
125
+ const uint8_t * data = (const uint8_t *) key;
126
+ const int nblocks = len / 16;
127
+
128
+ uint32_t h1 = seed;
129
+ uint32_t h2 = seed;
130
+ uint32_t h3 = seed;
131
+ uint32_t h4 = seed;
132
+
133
+ const uint32_t c1 = 0x239b961b;
134
+ const uint32_t c2 = 0xab0e9789;
135
+ const uint32_t c3 = 0x38b34ae5;
136
+ const uint32_t c4 = 0xa1e38b93;
137
+
138
+ /* body */
139
+ {
140
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
141
+ int i;
142
+
143
+ for (i = -nblocks; i; i++) {
144
+ uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
145
+ uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
146
+ uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
147
+ uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
148
+
149
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
150
+
151
+ h1 = hash_rotl_32(h1, 19); h1 += h2;
152
+ h1 = h1*5 + 0x561ccd1b;
153
+
154
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
155
+
156
+ h2 = hash_rotl_32(h2, 17); h2 += h3;
157
+ h2 = h2*5 + 0x0bcaa747;
158
+
159
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
160
+
161
+ h3 = hash_rotl_32(h3, 15); h3 += h4;
162
+ h3 = h3*5 + 0x96cd1c35;
163
+
164
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
165
+
166
+ h4 = hash_rotl_32(h4, 13); h4 += h1;
167
+ h4 = h4*5 + 0x32ac3b17;
168
+ }
169
+ }
170
+
171
+ /* tail */
172
+ {
173
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
174
+ uint32_t k1 = 0;
175
+ uint32_t k2 = 0;
176
+ uint32_t k3 = 0;
177
+ uint32_t k4 = 0;
178
+
179
+ switch (len & 15) {
180
+ case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH;
181
+ case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH;
182
+ case 13: k4 ^= tail[12] << 0;
183
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
184
+ JEMALLOC_FALLTHROUGH;
185
+ case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH;
186
+ case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH;
187
+ case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH;
188
+ case 9: k3 ^= tail[ 8] << 0;
189
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
190
+ JEMALLOC_FALLTHROUGH;
191
+ case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH;
192
+ case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH;
193
+ case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH;
194
+ case 5: k2 ^= tail[ 4] << 0;
195
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
196
+ JEMALLOC_FALLTHROUGH;
197
+ case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH;
198
+ case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH;
199
+ case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH;
200
+ case 1: k1 ^= tail[ 0] << 0;
201
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
202
+ break;
203
+ }
204
+ }
205
+
206
+ /* finalization */
207
+ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
208
+
209
+ h1 += h2; h1 += h3; h1 += h4;
210
+ h2 += h1; h3 += h1; h4 += h1;
211
+
212
+ h1 = hash_fmix_32(h1);
213
+ h2 = hash_fmix_32(h2);
214
+ h3 = hash_fmix_32(h3);
215
+ h4 = hash_fmix_32(h4);
216
+
217
+ h1 += h2; h1 += h3; h1 += h4;
218
+ h2 += h1; h3 += h1; h4 += h1;
219
+
220
+ r_out[0] = (((uint64_t) h2) << 32) | h1;
221
+ r_out[1] = (((uint64_t) h4) << 32) | h3;
222
+ }
223
+
224
+ static inline void
225
+ hash_x64_128(const void *key, const int len, const uint32_t seed,
226
+ uint64_t r_out[2]) {
227
+ const uint8_t *data = (const uint8_t *) key;
228
+ const int nblocks = len / 16;
229
+
230
+ uint64_t h1 = seed;
231
+ uint64_t h2 = seed;
232
+
233
+ const uint64_t c1 = KQU(0x87c37b91114253d5);
234
+ const uint64_t c2 = KQU(0x4cf5ad432745937f);
235
+
236
+ /* body */
237
+ {
238
+ const uint64_t *blocks = (const uint64_t *) (data);
239
+ int i;
240
+
241
+ for (i = 0; i < nblocks; i++) {
242
+ uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
243
+ uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
244
+
245
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
246
+
247
+ h1 = hash_rotl_64(h1, 27); h1 += h2;
248
+ h1 = h1*5 + 0x52dce729;
249
+
250
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
251
+
252
+ h2 = hash_rotl_64(h2, 31); h2 += h1;
253
+ h2 = h2*5 + 0x38495ab5;
254
+ }
255
+ }
256
+
257
+ /* tail */
258
+ {
259
+ const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
260
+ uint64_t k1 = 0;
261
+ uint64_t k2 = 0;
262
+
263
+ switch (len & 15) {
264
+ case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH;
265
+ case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH;
266
+ case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH;
267
+ case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH;
268
+ case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH;
269
+ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH;
270
+ case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
271
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
272
+ JEMALLOC_FALLTHROUGH;
273
+ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH;
274
+ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH;
275
+ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH;
276
+ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH;
277
+ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH;
278
+ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH;
279
+ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH;
280
+ case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
281
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
282
+ break;
283
+ }
284
+ }
285
+
286
+ /* finalization */
287
+ h1 ^= len; h2 ^= len;
288
+
289
+ h1 += h2;
290
+ h2 += h1;
291
+
292
+ h1 = hash_fmix_64(h1);
293
+ h2 = hash_fmix_64(h2);
294
+
295
+ h1 += h2;
296
+ h2 += h1;
297
+
298
+ r_out[0] = h1;
299
+ r_out[1] = h2;
300
+ }
301
+
302
+ /******************************************************************************/
303
+ /* API. */
304
+ static inline void
305
+ hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
306
+ assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
307
+
308
+ #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
309
+ hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
310
+ #else
311
+ {
312
+ uint64_t hashes[2];
313
+ hash_x86_128(key, (int)len, seed, hashes);
314
+ r_hash[0] = (size_t)hashes[0];
315
+ r_hash[1] = (size_t)hashes[1];
316
+ }
317
+ #endif
318
+ }
319
+
320
+ #endif /* JEMALLOC_INTERNAL_HASH_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hook.h ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_HOOK_H
2
+ #define JEMALLOC_INTERNAL_HOOK_H
3
+
4
+ #include "jemalloc/internal/tsd.h"
5
+
6
+ /*
7
+ * This API is *extremely* experimental, and may get ripped out, changed in API-
8
+ * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
9
+ *
10
+ * It allows hooking the stateful parts of the API to see changes as they
11
+ * happen.
12
+ *
13
+ * Allocation hooks are called after the allocation is done, free hooks are
14
+ * called before the free is done, and expand hooks are called after the
15
+ * allocation is expanded.
16
+ *
17
+ * For realloc and rallocx, if the expansion happens in place, the expansion
18
+ * hook is called. If it is moved, then the alloc hook is called on the new
19
+ * location, and then the free hook is called on the old location (i.e. both
20
+ * hooks are invoked in between the alloc and the dalloc).
21
+ *
22
+ * If we return NULL from OOM, then usize might not be trustworthy. Calling
23
+ * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
24
+ * only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0),
25
+ * and only calls the alloc hook).
26
+ *
27
+ * Reentrancy:
28
+ * Reentrancy is guarded against from within the hook implementation. If you
29
+ * call allocator functions from within a hook, the hooks will not be invoked
30
+ * again.
31
+ * Threading:
32
+ * The installation of a hook synchronizes with all its uses. If you can
33
+ * prove the installation of a hook happens-before a jemalloc entry point,
34
+ * then the hook will get invoked (unless there's a racing removal).
35
+ *
36
+ * Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
37
+ * allocates and has the alloc hook invoked, then a subsequent free on the
38
+ * same thread will also have the free hook invoked).
39
+ *
40
+ * The *removal* of a hook does *not* block until all threads are done with
41
+ * the hook. Hook authors have to be resilient to this, and need some
42
+ * out-of-band mechanism for cleaning up any dynamically allocated memory
43
+ * associated with their hook.
44
+ * Ordering:
45
+ * Order of hook execution is unspecified, and may be different than insertion
46
+ * order.
47
+ */
48
+
49
+ #define HOOK_MAX 4
50
+
51
+ enum hook_alloc_e {
52
+ hook_alloc_malloc,
53
+ hook_alloc_posix_memalign,
54
+ hook_alloc_aligned_alloc,
55
+ hook_alloc_calloc,
56
+ hook_alloc_memalign,
57
+ hook_alloc_valloc,
58
+ hook_alloc_mallocx,
59
+
60
+ /* The reallocating functions have both alloc and dalloc variants */
61
+ hook_alloc_realloc,
62
+ hook_alloc_rallocx,
63
+ };
64
+ /*
65
+ * We put the enum typedef after the enum, since this file may get included by
66
+ * jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
67
+ */
68
+ typedef enum hook_alloc_e hook_alloc_t;
69
+
70
+ enum hook_dalloc_e {
71
+ hook_dalloc_free,
72
+ hook_dalloc_dallocx,
73
+ hook_dalloc_sdallocx,
74
+
75
+ /*
76
+ * The dalloc halves of reallocation (not called if in-place expansion
77
+ * happens).
78
+ */
79
+ hook_dalloc_realloc,
80
+ hook_dalloc_rallocx,
81
+ };
82
+ typedef enum hook_dalloc_e hook_dalloc_t;
83
+
84
+
85
+ enum hook_expand_e {
86
+ hook_expand_realloc,
87
+ hook_expand_rallocx,
88
+ hook_expand_xallocx,
89
+ };
90
+ typedef enum hook_expand_e hook_expand_t;
91
+
92
+ typedef void (*hook_alloc)(
93
+ void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
94
+ uintptr_t args_raw[3]);
95
+
96
+ typedef void (*hook_dalloc)(
97
+ void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
98
+
99
+ typedef void (*hook_expand)(
100
+ void *extra, hook_expand_t type, void *address, size_t old_usize,
101
+ size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
102
+
103
+ typedef struct hooks_s hooks_t;
104
+ struct hooks_s {
105
+ hook_alloc alloc_hook;
106
+ hook_dalloc dalloc_hook;
107
+ hook_expand expand_hook;
108
+ void *extra;
109
+ };
110
+
111
+ /*
112
+ * Begin implementation details; everything above this point might one day live
113
+ * in a public API. Everything below this point never will.
114
+ */
115
+
116
+ /*
117
+ * The realloc pathways haven't gotten any refactoring love in a while, and it's
118
+ * fairly difficult to pass information from the entry point to the hooks. We
119
+ * put the informaiton the hooks will need into a struct to encapsulate
120
+ * everything.
121
+ *
122
+ * Much of these pathways are force-inlined, so that the compiler can avoid
123
+ * materializing this struct until we hit an extern arena function. For fairly
124
+ * goofy reasons, *many* of the realloc paths hit an extern arena function.
125
+ * These paths are cold enough that it doesn't matter; eventually, we should
126
+ * rewrite the realloc code to make the expand-in-place and the
127
+ * free-then-realloc paths more orthogonal, at which point we don't need to
128
+ * spread the hook logic all over the place.
129
+ */
130
+ typedef struct hook_ralloc_args_s hook_ralloc_args_t;
131
+ struct hook_ralloc_args_s {
132
+ /* I.e. as opposed to rallocx. */
133
+ bool is_realloc;
134
+ /*
135
+ * The expand hook takes 4 arguments, even if only 3 are actually used;
136
+ * we add an extra one in case the user decides to memcpy without
137
+ * looking too closely at the hooked function.
138
+ */
139
+ uintptr_t args[4];
140
+ };
141
+
142
+ /*
143
+ * Returns an opaque handle to be used when removing the hook. NULL means that
144
+ * we couldn't install the hook.
145
+ */
146
+ bool hook_boot();
147
+
148
+ void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
149
+ /* Uninstalls the hook with the handle previously returned from hook_install. */
150
+ void hook_remove(tsdn_t *tsdn, void *opaque);
151
+
152
+ /* Hooks */
153
+
154
+ void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
155
+ uintptr_t args_raw[3]);
156
+
157
+ void hook_invoke_dalloc(hook_dalloc_t type, void *address,
158
+ uintptr_t args_raw[3]);
159
+
160
+ void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
161
+ size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
162
+
163
+ #endif /* JEMALLOC_INTERNAL_HOOK_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpa.h ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_HPA_H
2
+ #define JEMALLOC_INTERNAL_HPA_H
3
+
4
+ #include "jemalloc/internal/exp_grow.h"
5
+ #include "jemalloc/internal/hpa_hooks.h"
6
+ #include "jemalloc/internal/hpa_opts.h"
7
+ #include "jemalloc/internal/pai.h"
8
+ #include "jemalloc/internal/psset.h"
9
+
10
+ typedef struct hpa_central_s hpa_central_t;
11
+ struct hpa_central_s {
12
+ /*
13
+ * The mutex guarding most of the operations on the central data
14
+ * structure.
15
+ */
16
+ malloc_mutex_t mtx;
17
+ /*
18
+ * Guards expansion of eden. We separate this from the regular mutex so
19
+ * that cheaper operations can still continue while we're doing the OS
20
+ * call.
21
+ */
22
+ malloc_mutex_t grow_mtx;
23
+ /*
24
+ * Either NULL (if empty), or some integer multiple of a
25
+ * hugepage-aligned number of hugepages. We carve them off one at a
26
+ * time to satisfy new pageslab requests.
27
+ *
28
+ * Guarded by grow_mtx.
29
+ */
30
+ void *eden;
31
+ size_t eden_len;
32
+ /* Source for metadata. */
33
+ base_t *base;
34
+ /* Number of grow operations done on this hpa_central_t. */
35
+ uint64_t age_counter;
36
+
37
+ /* The HPA hooks. */
38
+ hpa_hooks_t hooks;
39
+ };
40
+
41
+ typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
42
+ struct hpa_shard_nonderived_stats_s {
43
+ /*
44
+ * The number of times we've purged within a hugepage.
45
+ *
46
+ * Guarded by mtx.
47
+ */
48
+ uint64_t npurge_passes;
49
+ /*
50
+ * The number of individual purge calls we perform (which should always
51
+ * be bigger than npurge_passes, since each pass purges at least one
52
+ * extent within a hugepage.
53
+ *
54
+ * Guarded by mtx.
55
+ */
56
+ uint64_t npurges;
57
+
58
+ /*
59
+ * The number of times we've hugified a pageslab.
60
+ *
61
+ * Guarded by mtx.
62
+ */
63
+ uint64_t nhugifies;
64
+ /*
65
+ * The number of times we've dehugified a pageslab.
66
+ *
67
+ * Guarded by mtx.
68
+ */
69
+ uint64_t ndehugifies;
70
+ };
71
+
72
+ /* Completely derived; only used by CTL. */
73
+ typedef struct hpa_shard_stats_s hpa_shard_stats_t;
74
+ struct hpa_shard_stats_s {
75
+ psset_stats_t psset_stats;
76
+ hpa_shard_nonderived_stats_t nonderived_stats;
77
+ };
78
+
79
+ typedef struct hpa_shard_s hpa_shard_t;
80
+ struct hpa_shard_s {
81
+ /*
82
+ * pai must be the first member; we cast from a pointer to it to a
83
+ * pointer to the hpa_shard_t.
84
+ */
85
+ pai_t pai;
86
+
87
+ /* The central allocator we get our hugepages from. */
88
+ hpa_central_t *central;
89
+ /* Protects most of this shard's state. */
90
+ malloc_mutex_t mtx;
91
+ /*
92
+ * Guards the shard's access to the central allocator (preventing
93
+ * multiple threads operating on this shard from accessing the central
94
+ * allocator).
95
+ */
96
+ malloc_mutex_t grow_mtx;
97
+ /* The base metadata allocator. */
98
+ base_t *base;
99
+
100
+ /*
101
+ * This edata cache is the one we use when allocating a small extent
102
+ * from a pageslab. The pageslab itself comes from the centralized
103
+ * allocator, and so will use its edata_cache.
104
+ */
105
+ edata_cache_fast_t ecf;
106
+
107
+ psset_t psset;
108
+
109
+ /*
110
+ * How many grow operations have occurred.
111
+ *
112
+ * Guarded by grow_mtx.
113
+ */
114
+ uint64_t age_counter;
115
+
116
+ /* The arena ind we're associated with. */
117
+ unsigned ind;
118
+
119
+ /*
120
+ * Our emap. This is just a cache of the emap pointer in the associated
121
+ * hpa_central.
122
+ */
123
+ emap_t *emap;
124
+
125
+ /* The configuration choices for this hpa shard. */
126
+ hpa_shard_opts_t opts;
127
+
128
+ /*
129
+ * How many pages have we started but not yet finished purging in this
130
+ * hpa shard.
131
+ */
132
+ size_t npending_purge;
133
+
134
+ /*
135
+ * Those stats which are copied directly into the CTL-centric hpa shard
136
+ * stats.
137
+ */
138
+ hpa_shard_nonderived_stats_t stats;
139
+
140
+ /*
141
+ * Last time we performed purge on this shard.
142
+ */
143
+ nstime_t last_purge;
144
+ };
145
+
146
+ /*
147
+ * Whether or not the HPA can be used given the current configuration. This is
148
+ * is not necessarily a guarantee that it backs its allocations by hugepages,
149
+ * just that it can function properly given the system it's running on.
150
+ */
151
+ bool hpa_supported();
152
+ bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
153
+ bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
154
+ base_t *base, edata_cache_t *edata_cache, unsigned ind,
155
+ const hpa_shard_opts_t *opts);
156
+
157
+ void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
158
+ void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
159
+ hpa_shard_stats_t *dst);
160
+
161
+ /*
162
+ * Notify the shard that we won't use it for allocations much longer. Due to
163
+ * the possibility of races, we don't actually prevent allocations; just flush
164
+ * and disable the embedded edata_cache_small.
165
+ */
166
+ void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
167
+ void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
168
+
169
+ void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
170
+ bool deferral_allowed);
171
+ void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
172
+
173
+ /*
174
+ * We share the fork ordering with the PA and arena prefork handling; that's why
175
+ * these are 3 and 4 rather than 0 and 1.
176
+ */
177
+ void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
178
+ void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
179
+ void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
180
+ void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
181
+
182
+ #endif /* JEMALLOC_INTERNAL_HPA_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpa_hooks.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
2
+ #define JEMALLOC_INTERNAL_HPA_HOOKS_H
3
+
4
+ typedef struct hpa_hooks_s hpa_hooks_t;
5
+ struct hpa_hooks_s {
6
+ void *(*map)(size_t size);
7
+ void (*unmap)(void *ptr, size_t size);
8
+ void (*purge)(void *ptr, size_t size);
9
+ void (*hugify)(void *ptr, size_t size);
10
+ void (*dehugify)(void *ptr, size_t size);
11
+ void (*curtime)(nstime_t *r_time, bool first_reading);
12
+ uint64_t (*ms_since)(nstime_t *r_time);
13
+ };
14
+
15
+ extern hpa_hooks_t hpa_hooks_default;
16
+
17
+ #endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpa_opts.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
2
+ #define JEMALLOC_INTERNAL_HPA_OPTS_H
3
+
4
+ #include "jemalloc/internal/fxp.h"
5
+
6
+ /*
7
+ * This file is morally part of hpa.h, but is split out for header-ordering
8
+ * reasons.
9
+ */
10
+
11
+ typedef struct hpa_shard_opts_s hpa_shard_opts_t;
12
+ struct hpa_shard_opts_s {
13
+ /*
14
+ * The largest size we'll allocate out of the shard. For those
15
+ * allocations refused, the caller (in practice, the PA module) will
16
+ * fall back to the more general (for now) PAC, which can always handle
17
+ * any allocation request.
18
+ */
19
+ size_t slab_max_alloc;
20
+
21
+ /*
22
+ * When the number of active bytes in a hugepage is >=
23
+ * hugification_threshold, we force hugify it.
24
+ */
25
+ size_t hugification_threshold;
26
+
27
+ /*
28
+ * The HPA purges whenever the number of pages exceeds dirty_mult *
29
+ * active_pages. This may be set to (fxp_t)-1 to disable purging.
30
+ */
31
+ fxp_t dirty_mult;
32
+
33
+ /*
34
+ * Whether or not the PAI methods are allowed to defer work to a
35
+ * subsequent hpa_shard_do_deferred_work() call. Practically, this
36
+ * corresponds to background threads being enabled. We track this
37
+ * ourselves for encapsulation purposes.
38
+ */
39
+ bool deferral_allowed;
40
+
41
+ /*
42
+ * How long a hugepage has to be a hugification candidate before it will
43
+ * actually get hugified.
44
+ */
45
+ uint64_t hugify_delay_ms;
46
+
47
+ /*
48
+ * Minimum amount of time between purges.
49
+ */
50
+ uint64_t min_purge_interval_ms;
51
+ };
52
+
53
+ #define HPA_SHARD_OPTS_DEFAULT { \
54
+ /* slab_max_alloc */ \
55
+ 64 * 1024, \
56
+ /* hugification_threshold */ \
57
+ HUGEPAGE * 95 / 100, \
58
+ /* dirty_mult */ \
59
+ FXP_INIT_PERCENT(25), \
60
+ /* \
61
+ * deferral_allowed \
62
+ * \
63
+ * Really, this is always set by the arena during creation \
64
+ * or by an hpa_shard_set_deferral_allowed call, so the value \
65
+ * we put here doesn't matter. \
66
+ */ \
67
+ false, \
68
+ /* hugify_delay_ms */ \
69
+ 10 * 1000, \
70
+ /* min_purge_interval_ms */ \
71
+ 5 * 1000 \
72
+ }
73
+
74
+ #endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/hpdata.h ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_HPDATA_H
2
+ #define JEMALLOC_INTERNAL_HPDATA_H
3
+
4
+ #include "jemalloc/internal/fb.h"
5
+ #include "jemalloc/internal/ph.h"
6
+ #include "jemalloc/internal/ql.h"
7
+ #include "jemalloc/internal/typed_list.h"
8
+
9
+ /*
10
+ * The metadata representation we use for extents in hugepages. While the PAC
11
+ * uses the edata_t to represent both active and inactive extents, the HP only
12
+ * uses the edata_t for active ones; instead, inactive extent state is tracked
13
+ * within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
14
+ * region of virtual address space.
15
+ *
16
+ * An hpdata need not be "truly" backed by a hugepage (which is not necessarily
17
+ * an observable property of any given region of address space). It's just
18
+ * hugepage-sized and hugepage-aligned; it's *potentially* huge.
19
+ */
20
+ typedef struct hpdata_s hpdata_t;
21
+ ph_structs(hpdata_age_heap, hpdata_t);
22
+ struct hpdata_s {
23
+ /*
24
+ * We likewise follow the edata convention of mangling names and forcing
25
+ * the use of accessors -- this lets us add some consistency checks on
26
+ * access.
27
+ */
28
+
29
+ /*
30
+ * The address of the hugepage in question. This can't be named h_addr,
31
+ * since that conflicts with a macro defined in Windows headers.
32
+ */
33
+ void *h_address;
34
+ /* Its age (measured in psset operations). */
35
+ uint64_t h_age;
36
+ /* Whether or not we think the hugepage is mapped that way by the OS. */
37
+ bool h_huge;
38
+
39
+ /*
40
+ * For some properties, we keep parallel sets of bools; h_foo_allowed
41
+ * and h_in_psset_foo_container. This is a decoupling mechanism to
42
+ * avoid bothering the hpa (which manages policies) from the psset
43
+ * (which is the mechanism used to enforce those policies). This allows
44
+ * all the container management logic to live in one place, without the
45
+ * HPA needing to know or care how that happens.
46
+ */
47
+
48
+ /*
49
+ * Whether or not the hpdata is allowed to be used to serve allocations,
50
+ * and whether or not the psset is currently tracking it as such.
51
+ */
52
+ bool h_alloc_allowed;
53
+ bool h_in_psset_alloc_container;
54
+
55
+ /*
56
+ * The same, but with purging. There's no corresponding
57
+ * h_in_psset_purge_container, because the psset (currently) always
58
+ * removes hpdatas from their containers during updates (to implement
59
+ * LRU for purging).
60
+ */
61
+ bool h_purge_allowed;
62
+
63
+ /* And with hugifying. */
64
+ bool h_hugify_allowed;
65
+ /* When we became a hugification candidate. */
66
+ nstime_t h_time_hugify_allowed;
67
+ bool h_in_psset_hugify_container;
68
+
69
+ /* Whether or not a purge or hugify is currently happening. */
70
+ bool h_mid_purge;
71
+ bool h_mid_hugify;
72
+
73
+ /*
74
+ * Whether or not the hpdata is being updated in the psset (i.e. if
75
+ * there has been a psset_update_begin call issued without a matching
76
+ * psset_update_end call). Eventually this will expand to other types
77
+ * of updates.
78
+ */
79
+ bool h_updating;
80
+
81
+ /* Whether or not the hpdata is in a psset. */
82
+ bool h_in_psset;
83
+
84
+ union {
85
+ /* When nonempty (and also nonfull), used by the psset bins. */
86
+ hpdata_age_heap_link_t age_link;
87
+ /*
88
+ * When empty (or not corresponding to any hugepage), list
89
+ * linkage.
90
+ */
91
+ ql_elm(hpdata_t) ql_link_empty;
92
+ };
93
+
94
+ /*
95
+ * Linkage for the psset to track candidates for purging and hugifying.
96
+ */
97
+ ql_elm(hpdata_t) ql_link_purge;
98
+ ql_elm(hpdata_t) ql_link_hugify;
99
+
100
+ /* The length of the largest contiguous sequence of inactive pages. */
101
+ size_t h_longest_free_range;
102
+
103
+ /* Number of active pages. */
104
+ size_t h_nactive;
105
+
106
+ /* A bitmap with bits set in the active pages. */
107
+ fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
108
+
109
+ /*
110
+ * Number of dirty or active pages, and a bitmap tracking them. One
111
+ * way to think of this is as which pages are dirty from the OS's
112
+ * perspective.
113
+ */
114
+ size_t h_ntouched;
115
+
116
+ /* The touched pages (using the same definition as above). */
117
+ fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
118
+ };
119
+
120
+ TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty)
121
+ TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge)
122
+ TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify)
123
+
124
+ ph_proto(, hpdata_age_heap, hpdata_t);
125
+
126
+ static inline void *
127
+ hpdata_addr_get(const hpdata_t *hpdata) {
128
+ return hpdata->h_address;
129
+ }
130
+
131
+ static inline void
132
+ hpdata_addr_set(hpdata_t *hpdata, void *addr) {
133
+ assert(HUGEPAGE_ADDR2BASE(addr) == addr);
134
+ hpdata->h_address = addr;
135
+ }
136
+
137
+ static inline uint64_t
138
+ hpdata_age_get(const hpdata_t *hpdata) {
139
+ return hpdata->h_age;
140
+ }
141
+
142
+ static inline void
143
+ hpdata_age_set(hpdata_t *hpdata, uint64_t age) {
144
+ hpdata->h_age = age;
145
+ }
146
+
147
+ static inline bool
148
+ hpdata_huge_get(const hpdata_t *hpdata) {
149
+ return hpdata->h_huge;
150
+ }
151
+
152
+ static inline bool
153
+ hpdata_alloc_allowed_get(const hpdata_t *hpdata) {
154
+ return hpdata->h_alloc_allowed;
155
+ }
156
+
157
+ static inline void
158
+ hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) {
159
+ hpdata->h_alloc_allowed = alloc_allowed;
160
+ }
161
+
162
+ static inline bool
163
+ hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) {
164
+ return hpdata->h_in_psset_alloc_container;
165
+ }
166
+
167
+ static inline void
168
+ hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) {
169
+ assert(in_container != hpdata->h_in_psset_alloc_container);
170
+ hpdata->h_in_psset_alloc_container = in_container;
171
+ }
172
+
173
+ static inline bool
174
+ hpdata_purge_allowed_get(const hpdata_t *hpdata) {
175
+ return hpdata->h_purge_allowed;
176
+ }
177
+
178
+ static inline void
179
+ hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) {
180
+ assert(purge_allowed == false || !hpdata->h_mid_purge);
181
+ hpdata->h_purge_allowed = purge_allowed;
182
+ }
183
+
184
+ static inline bool
185
+ hpdata_hugify_allowed_get(const hpdata_t *hpdata) {
186
+ return hpdata->h_hugify_allowed;
187
+ }
188
+
189
+ static inline void
190
+ hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) {
191
+ assert(!hpdata->h_mid_hugify);
192
+ hpdata->h_hugify_allowed = true;
193
+ hpdata->h_time_hugify_allowed = now;
194
+ }
195
+
196
+ static inline nstime_t
197
+ hpdata_time_hugify_allowed(hpdata_t *hpdata) {
198
+ return hpdata->h_time_hugify_allowed;
199
+ }
200
+
201
+ static inline void
202
+ hpdata_disallow_hugify(hpdata_t *hpdata) {
203
+ hpdata->h_hugify_allowed = false;
204
+ }
205
+
206
+ static inline bool
207
+ hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) {
208
+ return hpdata->h_in_psset_hugify_container;
209
+ }
210
+
211
+ static inline void
212
+ hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) {
213
+ assert(in_container != hpdata->h_in_psset_hugify_container);
214
+ hpdata->h_in_psset_hugify_container = in_container;
215
+ }
216
+
217
+ static inline bool
218
+ hpdata_mid_purge_get(const hpdata_t *hpdata) {
219
+ return hpdata->h_mid_purge;
220
+ }
221
+
222
+ static inline void
223
+ hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) {
224
+ assert(mid_purge != hpdata->h_mid_purge);
225
+ hpdata->h_mid_purge = mid_purge;
226
+ }
227
+
228
+ static inline bool
229
+ hpdata_mid_hugify_get(const hpdata_t *hpdata) {
230
+ return hpdata->h_mid_hugify;
231
+ }
232
+
233
+ static inline void
234
+ hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) {
235
+ assert(mid_hugify != hpdata->h_mid_hugify);
236
+ hpdata->h_mid_hugify = mid_hugify;
237
+ }
238
+
239
+ static inline bool
240
+ hpdata_changing_state_get(const hpdata_t *hpdata) {
241
+ return hpdata->h_mid_purge || hpdata->h_mid_hugify;
242
+ }
243
+
244
+
245
+ static inline bool
246
+ hpdata_updating_get(const hpdata_t *hpdata) {
247
+ return hpdata->h_updating;
248
+ }
249
+
250
+ static inline void
251
+ hpdata_updating_set(hpdata_t *hpdata, bool updating) {
252
+ assert(updating != hpdata->h_updating);
253
+ hpdata->h_updating = updating;
254
+ }
255
+
256
+ static inline bool
257
+ hpdata_in_psset_get(const hpdata_t *hpdata) {
258
+ return hpdata->h_in_psset;
259
+ }
260
+
261
+ static inline void
262
+ hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) {
263
+ assert(in_psset != hpdata->h_in_psset);
264
+ hpdata->h_in_psset = in_psset;
265
+ }
266
+
267
+ static inline size_t
268
+ hpdata_longest_free_range_get(const hpdata_t *hpdata) {
269
+ return hpdata->h_longest_free_range;
270
+ }
271
+
272
+ static inline void
273
+ hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) {
274
+ assert(longest_free_range <= HUGEPAGE_PAGES);
275
+ hpdata->h_longest_free_range = longest_free_range;
276
+ }
277
+
278
+ static inline size_t
279
+ hpdata_nactive_get(hpdata_t *hpdata) {
280
+ return hpdata->h_nactive;
281
+ }
282
+
283
+ static inline size_t
284
+ hpdata_ntouched_get(hpdata_t *hpdata) {
285
+ return hpdata->h_ntouched;
286
+ }
287
+
288
+ static inline size_t
289
+ hpdata_ndirty_get(hpdata_t *hpdata) {
290
+ return hpdata->h_ntouched - hpdata->h_nactive;
291
+ }
292
+
293
+ static inline size_t
294
+ hpdata_nretained_get(hpdata_t *hpdata) {
295
+ return HUGEPAGE_PAGES - hpdata->h_ntouched;
296
+ }
297
+
298
+ static inline void
299
+ hpdata_assert_empty(hpdata_t *hpdata) {
300
+ assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES));
301
+ assert(hpdata->h_nactive == 0);
302
+ }
303
+
304
+ /*
305
+ * Only used in tests, and in hpdata_assert_consistent, below. Verifies some
306
+ * consistency properties of the hpdata (e.g. that cached counts of page stats
307
+ * match computed ones).
308
+ */
309
+ static inline bool
310
+ hpdata_consistent(hpdata_t *hpdata) {
311
+ if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
312
+ != hpdata_longest_free_range_get(hpdata)) {
313
+ return false;
314
+ }
315
+ if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
316
+ != hpdata->h_nactive) {
317
+ return false;
318
+ }
319
+ if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
320
+ != hpdata->h_ntouched) {
321
+ return false;
322
+ }
323
+ if (hpdata->h_ntouched < hpdata->h_nactive) {
324
+ return false;
325
+ }
326
+ if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) {
327
+ return false;
328
+ }
329
+ if (hpdata_changing_state_get(hpdata)
330
+ && ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) {
331
+ return false;
332
+ }
333
+ if (hpdata_hugify_allowed_get(hpdata)
334
+ != hpdata_in_psset_hugify_container_get(hpdata)) {
335
+ return false;
336
+ }
337
+ return true;
338
+ }
339
+
340
+ static inline void
341
+ hpdata_assert_consistent(hpdata_t *hpdata) {
342
+ assert(hpdata_consistent(hpdata));
343
+ }
344
+
345
+ static inline bool
346
+ hpdata_empty(hpdata_t *hpdata) {
347
+ return hpdata->h_nactive == 0;
348
+ }
349
+
350
+ static inline bool
351
+ hpdata_full(hpdata_t *hpdata) {
352
+ return hpdata->h_nactive == HUGEPAGE_PAGES;
353
+ }
354
+
355
+ void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age);
356
+
357
+ /*
358
+ * Given an hpdata which can serve an allocation request, pick and reserve an
359
+ * offset within that allocation.
360
+ */
361
+ void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
362
+ void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz);
363
+
364
+ /*
365
+ * The hpdata_purge_prepare_t allows grabbing the metadata required to purge
366
+ * subranges of a hugepage while holding a lock, drop the lock during the actual
367
+ * purging of them, and reacquire it to update the metadata again.
368
+ */
369
+ typedef struct hpdata_purge_state_s hpdata_purge_state_t;
370
+ struct hpdata_purge_state_s {
371
+ size_t npurged;
372
+ size_t ndirty_to_purge;
373
+ fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)];
374
+ size_t next_purge_search_begin;
375
+ };
376
+
377
+ /*
378
+ * Initializes purge state. The access to hpdata must be externally
379
+ * synchronized with other hpdata_* calls.
380
+ *
381
+ * You can tell whether or not a thread is purging or hugifying a given hpdata
382
+ * via hpdata_changing_state_get(hpdata). Racing hugification or purging
383
+ * operations aren't allowed.
384
+ *
385
+ * Once you begin purging, you have to follow through and call hpdata_purge_next
386
+ * until you're done, and then end. Allocating out of an hpdata undergoing
387
+ * purging is not allowed.
388
+ *
389
+ * Returns the number of dirty pages that will be purged.
390
+ */
391
+ size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
392
+
393
+ /*
394
+ * If there are more extents to purge, sets *r_purge_addr and *r_purge_size to
395
+ * true, and returns true. Otherwise, returns false to indicate that we're
396
+ * done.
397
+ *
398
+ * This requires exclusive access to the purge state, but *not* to the hpdata.
399
+ * In particular, unreserve calls are allowed while purging (i.e. you can dalloc
400
+ * into one part of the hpdata while purging a different part).
401
+ */
402
+ bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
403
+ void **r_purge_addr, size_t *r_purge_size);
404
+ /*
405
+ * Updates the hpdata metadata after all purging is done. Needs external
406
+ * synchronization.
407
+ */
408
+ void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
409
+
410
+ void hpdata_hugify(hpdata_t *hpdata);
411
+ void hpdata_dehugify(hpdata_t *hpdata);
412
+
413
+ #endif /* JEMALLOC_INTERNAL_HPDATA_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/inspect.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_INSPECT_H
2
+ #define JEMALLOC_INTERNAL_INSPECT_H
3
+
4
+ /*
5
+ * This module contains the heap introspection capabilities. For now they are
6
+ * exposed purely through mallctl APIs in the experimental namespace, but this
7
+ * may change over time.
8
+ */
9
+
10
+ /*
11
+ * The following two structs are for experimental purposes. See
12
+ * experimental_utilization_query_ctl and
13
+ * experimental_utilization_batch_query_ctl in src/ctl.c.
14
+ */
15
+ typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t;
16
+ struct inspect_extent_util_stats_s {
17
+ size_t nfree;
18
+ size_t nregs;
19
+ size_t size;
20
+ };
21
+
22
+ typedef struct inspect_extent_util_stats_verbose_s
23
+ inspect_extent_util_stats_verbose_t;
24
+
25
+ struct inspect_extent_util_stats_verbose_s {
26
+ void *slabcur_addr;
27
+ size_t nfree;
28
+ size_t nregs;
29
+ size_t size;
30
+ size_t bin_nfree;
31
+ size_t bin_nregs;
32
+ };
33
+
34
+ void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
35
+ size_t *nfree, size_t *nregs, size_t *size);
36
+ void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
37
+ size_t *nfree, size_t *nregs, size_t *size,
38
+ size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
39
+
40
+ #endif /* JEMALLOC_INTERNAL_INSPECT_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_DECLS_H
2
+ #define JEMALLOC_INTERNAL_DECLS_H
3
+
4
+ #include <math.h>
5
+ #ifdef _WIN32
6
+ # include <windows.h>
7
+ # include "msvc_compat/windows_extra.h"
8
+ # include "msvc_compat/strings.h"
9
+ # ifdef _WIN64
10
+ # if LG_VADDR <= 32
11
+ # error Generate the headers using x64 vcargs
12
+ # endif
13
+ # else
14
+ # if LG_VADDR > 32
15
+ # undef LG_VADDR
16
+ # define LG_VADDR 32
17
+ # endif
18
+ # endif
19
+ #else
20
+ # include <sys/param.h>
21
+ # include <sys/mman.h>
22
+ # if !defined(__pnacl__) && !defined(__native_client__)
23
+ # include <sys/syscall.h>
24
+ # if !defined(SYS_write) && defined(__NR_write)
25
+ # define SYS_write __NR_write
26
+ # endif
27
+ # if defined(SYS_open) && defined(__aarch64__)
28
+ /* Android headers may define SYS_open to __NR_open even though
29
+ * __NR_open may not exist on AArch64 (superseded by __NR_openat). */
30
+ # undef SYS_open
31
+ # endif
32
+ # include <sys/uio.h>
33
+ # endif
34
+ # include <pthread.h>
35
+ # if defined(__FreeBSD__) || defined(__DragonFly__)
36
+ # include <pthread_np.h>
37
+ # include <sched.h>
38
+ # if defined(__FreeBSD__)
39
+ # define cpu_set_t cpuset_t
40
+ # endif
41
+ # endif
42
+ # include <signal.h>
43
+ # ifdef JEMALLOC_OS_UNFAIR_LOCK
44
+ # include <os/lock.h>
45
+ # endif
46
+ # ifdef JEMALLOC_GLIBC_MALLOC_HOOK
47
+ # include <sched.h>
48
+ # endif
49
+ # include <errno.h>
50
+ # include <sys/time.h>
51
+ # include <time.h>
52
+ # ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
53
+ # include <mach/mach_time.h>
54
+ # endif
55
+ #endif
56
+ #include <sys/types.h>
57
+
58
+ #include <limits.h>
59
+ #ifndef SIZE_T_MAX
60
+ # define SIZE_T_MAX SIZE_MAX
61
+ #endif
62
+ #ifndef SSIZE_MAX
63
+ # define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
64
+ #endif
65
+ #include <stdarg.h>
66
+ #include <stdbool.h>
67
+ #include <stdio.h>
68
+ #include <stdlib.h>
69
+ #include <stdint.h>
70
+ #include <stddef.h>
71
+ #ifndef offsetof
72
+ # define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
73
+ #endif
74
+ #include <string.h>
75
+ #include <strings.h>
76
+ #include <ctype.h>
77
+ #ifdef _MSC_VER
78
+ # include <io.h>
79
+ typedef intptr_t ssize_t;
80
+ # define PATH_MAX 1024
81
+ # define STDERR_FILENO 2
82
+ # define __func__ __FUNCTION__
83
+ # ifdef JEMALLOC_HAS_RESTRICT
84
+ # define restrict __restrict
85
+ # endif
86
+ /* Disable warnings about deprecated system functions. */
87
+ # pragma warning(disable: 4996)
88
+ #if _MSC_VER < 1800
89
+ static int
90
+ isblank(int c) {
91
+ return (c == '\t' || c == ' ');
92
+ }
93
+ #endif
94
+ #else
95
+ # include <unistd.h>
96
+ #endif
97
+ #include <fcntl.h>
98
+
99
+ /*
100
+ * The Win32 midl compiler has #define small char; we don't use midl, but
101
+ * "small" is a nice identifier to have available when talking about size
102
+ * classes.
103
+ */
104
+ #ifdef small
105
+ # undef small
106
+ #endif
107
+
108
+ #endif /* JEMALLOC_INTERNAL_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_DEFS_H_
2
+ #define JEMALLOC_INTERNAL_DEFS_H_
3
+ /*
4
+ * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
5
+ * public APIs to be prefixed. This makes it possible, with some care, to use
6
+ * multiple allocators simultaneously.
7
+ */
8
+ #undef JEMALLOC_PREFIX
9
+ #undef JEMALLOC_CPREFIX
10
+
11
+ /*
12
+ * Define overrides for non-standard allocator-related functions if they are
13
+ * present on the system.
14
+ */
15
+ #undef JEMALLOC_OVERRIDE___LIBC_CALLOC
16
+ #undef JEMALLOC_OVERRIDE___LIBC_FREE
17
+ #undef JEMALLOC_OVERRIDE___LIBC_MALLOC
18
+ #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
19
+ #undef JEMALLOC_OVERRIDE___LIBC_REALLOC
20
+ #undef JEMALLOC_OVERRIDE___LIBC_VALLOC
21
+ #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
22
+
23
+ /*
24
+ * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
25
+ * For shared libraries, symbol visibility mechanisms prevent these symbols
26
+ * from being exported, but for static libraries, naming collisions are a real
27
+ * possibility.
28
+ */
29
+ #undef JEMALLOC_PRIVATE_NAMESPACE
30
+
31
+ /*
32
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
33
+ * order to yield to another virtual CPU.
34
+ */
35
+ #undef CPU_SPINWAIT
36
+ /* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
37
+ #undef HAVE_CPU_SPINWAIT
38
+
39
+ /*
40
+ * Number of significant bits in virtual addresses. This may be less than the
41
+ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
42
+ * bits are the same as bit 47.
43
+ */
44
+ #undef LG_VADDR
45
+
46
+ /* Defined if C11 atomics are available. */
47
+ #undef JEMALLOC_C11_ATOMICS
48
+
49
+ /* Defined if GCC __atomic atomics are available. */
50
+ #undef JEMALLOC_GCC_ATOMIC_ATOMICS
51
+ /* and the 8-bit variant support. */
52
+ #undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
53
+
54
+ /* Defined if GCC __sync atomics are available. */
55
+ #undef JEMALLOC_GCC_SYNC_ATOMICS
56
+ /* and the 8-bit variant support. */
57
+ #undef JEMALLOC_GCC_U8_SYNC_ATOMICS
58
+
59
+ /*
60
+ * Defined if __builtin_clz() and __builtin_clzl() are available.
61
+ */
62
+ #undef JEMALLOC_HAVE_BUILTIN_CLZ
63
+
64
+ /*
65
+ * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
66
+ */
67
+ #undef JEMALLOC_OS_UNFAIR_LOCK
68
+
69
+ /* Defined if syscall(2) is usable. */
70
+ #undef JEMALLOC_USE_SYSCALL
71
+
72
+ /*
73
+ * Defined if secure_getenv(3) is available.
74
+ */
75
+ #undef JEMALLOC_HAVE_SECURE_GETENV
76
+
77
+ /*
78
+ * Defined if issetugid(2) is available.
79
+ */
80
+ #undef JEMALLOC_HAVE_ISSETUGID
81
+
82
+ /* Defined if pthread_atfork(3) is available. */
83
+ #undef JEMALLOC_HAVE_PTHREAD_ATFORK
84
+
85
+ /* Defined if pthread_setname_np(3) is available. */
86
+ #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
87
+
88
+ /* Defined if pthread_getname_np(3) is available. */
89
+ #undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP
90
+
91
+ /* Defined if pthread_get_name_np(3) is available. */
92
+ #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
93
+
94
+ /*
95
+ * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
96
+ */
97
+ #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
98
+
99
+ /*
100
+ * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
101
+ */
102
+ #undef JEMALLOC_HAVE_CLOCK_MONOTONIC
103
+
104
+ /*
105
+ * Defined if mach_absolute_time() is available.
106
+ */
107
+ #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
108
+
109
+ /*
110
+ * Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
111
+ */
112
+ #undef JEMALLOC_HAVE_CLOCK_REALTIME
113
+
114
+ /*
115
+ * Defined if _malloc_thread_cleanup() exists. At least in the case of
116
+ * FreeBSD, pthread_key_create() allocates, which if used during malloc
117
+ * bootstrapping will cause recursion into the pthreads library. Therefore, if
118
+ * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
119
+ * malloc_tsd.
120
+ */
121
+ #undef JEMALLOC_MALLOC_THREAD_CLEANUP
122
+
123
+ /*
124
+ * Defined if threaded initialization is known to be safe on this platform.
125
+ * Among other things, it must be possible to initialize a mutex without
126
+ * triggering allocation in order for threaded allocation to be safe.
127
+ */
128
+ #undef JEMALLOC_THREADED_INIT
129
+
130
+ /*
131
+ * Defined if the pthreads implementation defines
132
+ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
133
+ * to avoid recursive allocation during mutex initialization.
134
+ */
135
+ #undef JEMALLOC_MUTEX_INIT_CB
136
+
137
+ /* Non-empty if the tls_model attribute is supported. */
138
+ #undef JEMALLOC_TLS_MODEL
139
+
140
+ /*
141
+ * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
142
+ * inline functions.
143
+ */
144
+ #undef JEMALLOC_DEBUG
145
+
146
+ /* JEMALLOC_STATS enables statistics calculation. */
147
+ #undef JEMALLOC_STATS
148
+
149
+ /* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
150
+ #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
151
+
152
+ /* JEMALLOC_PROF enables allocation profiling. */
153
+ #undef JEMALLOC_PROF
154
+
155
+ /* Use libunwind for profile backtracing if defined. */
156
+ #undef JEMALLOC_PROF_LIBUNWIND
157
+
158
+ /* Use libgcc for profile backtracing if defined. */
159
+ #undef JEMALLOC_PROF_LIBGCC
160
+
161
+ /* Use gcc intrinsics for profile backtracing if defined. */
162
+ #undef JEMALLOC_PROF_GCC
163
+
164
+ /*
165
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
166
+ * segment (DSS).
167
+ */
168
+ #undef JEMALLOC_DSS
169
+
170
+ /* Support memory filling (junk/zero). */
171
+ #undef JEMALLOC_FILL
172
+
173
+ /* Support utrace(2)-based tracing. */
174
+ #undef JEMALLOC_UTRACE
175
+
176
+ /* Support utrace(2)-based tracing (label based signature). */
177
+ #undef JEMALLOC_UTRACE_LABEL
178
+
179
+ /* Support optional abort() on OOM. */
180
+ #undef JEMALLOC_XMALLOC
181
+
182
+ /* Support lazy locking (avoid locking unless a second thread is launched). */
183
+ #undef JEMALLOC_LAZY_LOCK
184
+
185
+ /*
186
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
187
+ * classes).
188
+ */
189
+ #undef LG_QUANTUM
190
+
191
+ /* One page is 2^LG_PAGE bytes. */
192
+ #undef LG_PAGE
193
+
194
+ /* Maximum number of regions in a slab. */
195
+ #undef CONFIG_LG_SLAB_MAXREGS
196
+
197
+ /*
198
+ * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
199
+ * system does not explicitly support huge pages; system calls that require
200
+ * explicit huge page support are separately configured.
201
+ */
202
+ #undef LG_HUGEPAGE
203
+
204
+ /*
205
+ * If defined, adjacent virtual memory mappings with identical attributes
206
+ * automatically coalesce, and they fragment when changes are made to subranges.
207
+ * This is the normal order of things for mmap()/munmap(), but on Windows
208
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
209
+ * mappings do *not* coalesce/fragment.
210
+ */
211
+ #undef JEMALLOC_MAPS_COALESCE
212
+
213
+ /*
214
+ * If defined, retain memory for later reuse by default rather than using e.g.
215
+ * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
216
+ * common sequences of mmap()/munmap() calls will cause virtual memory map
217
+ * holes.
218
+ */
219
+ #undef JEMALLOC_RETAIN
220
+
221
+ /* TLS is used to map arenas and magazine caches to threads. */
222
+ #undef JEMALLOC_TLS
223
+
224
+ /*
225
+ * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
226
+ * Don't use this directly; instead use unreachable() from util.h
227
+ */
228
+ #undef JEMALLOC_INTERNAL_UNREACHABLE
229
+
230
+ /*
231
+ * ffs*() functions to use for bitmapping. Don't use these directly; instead,
232
+ * use ffs_*() from util.h.
233
+ */
234
+ #undef JEMALLOC_INTERNAL_FFSLL
235
+ #undef JEMALLOC_INTERNAL_FFSL
236
+ #undef JEMALLOC_INTERNAL_FFS
237
+
238
+ /*
239
+ * popcount*() functions to use for bitmapping.
240
+ */
241
+ #undef JEMALLOC_INTERNAL_POPCOUNTL
242
+ #undef JEMALLOC_INTERNAL_POPCOUNT
243
+
244
+ /*
245
+ * If defined, explicitly attempt to more uniformly distribute large allocation
246
+ * pointer alignments across all cache indices.
247
+ */
248
+ #undef JEMALLOC_CACHE_OBLIVIOUS
249
+
250
+ /*
251
+ * If defined, enable logging facilities. We make this a configure option to
252
+ * avoid taking extra branches everywhere.
253
+ */
254
+ #undef JEMALLOC_LOG
255
+
256
+ /*
257
+ * If defined, use readlinkat() (instead of readlink()) to follow
258
+ * /etc/malloc_conf.
259
+ */
260
+ #undef JEMALLOC_READLINKAT
261
+
262
+ /*
263
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
264
+ */
265
+ #undef JEMALLOC_ZONE
266
+
267
+ /*
268
+ * Methods for determining whether the OS overcommits.
269
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
270
+ * /proc/sys/vm.overcommit_memory file.
271
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
272
+ */
273
+ #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
274
+ #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
275
+
276
+ /* Defined if madvise(2) is available. */
277
+ #undef JEMALLOC_HAVE_MADVISE
278
+
279
+ /*
280
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
281
+ * arguments to madvise(2).
282
+ */
283
+ #undef JEMALLOC_HAVE_MADVISE_HUGE
284
+
285
+ /*
286
+ * Methods for purging unused pages differ between operating systems.
287
+ *
288
+ * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
289
+ * will be discarded rather than swapped out.
290
+ * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
291
+ * defined, this immediately discards pages,
292
+ * such that new pages will be demand-zeroed if
293
+ * the address region is later touched;
294
+ * otherwise this behaves similarly to
295
+ * MADV_FREE, though typically with higher
296
+ * system overhead.
297
+ */
298
+ #undef JEMALLOC_PURGE_MADVISE_FREE
299
+ #undef JEMALLOC_PURGE_MADVISE_DONTNEED
300
+ #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
301
+
302
+ /* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
303
+ #undef JEMALLOC_DEFINE_MADVISE_FREE
304
+
305
+ /*
306
+ * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
307
+ */
308
+ #undef JEMALLOC_MADVISE_DONTDUMP
309
+
310
+ /*
311
+ * Defined if MADV_[NO]CORE is supported as an argument to madvise.
312
+ */
313
+ #undef JEMALLOC_MADVISE_NOCORE
314
+
315
+ /* Defined if mprotect(2) is available. */
316
+ #undef JEMALLOC_HAVE_MPROTECT
317
+
318
+ /*
319
+ * Defined if transparent huge pages (THPs) are supported via the
320
+ * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
321
+ */
322
+ #undef JEMALLOC_THP
323
+
324
+ /* Defined if posix_madvise is available. */
325
+ #undef JEMALLOC_HAVE_POSIX_MADVISE
326
+
327
+ /*
328
+ * Method for purging unused pages using posix_madvise.
329
+ *
330
+ * posix_madvise(..., POSIX_MADV_DONTNEED)
331
+ */
332
+ #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED
333
+ #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS
334
+
335
+ /*
336
+ * Defined if memcntl page admin call is supported
337
+ */
338
+ #undef JEMALLOC_HAVE_MEMCNTL
339
+
340
+ /*
341
+ * Defined if malloc_size is supported
342
+ */
343
+ #undef JEMALLOC_HAVE_MALLOC_SIZE
344
+
345
+ /* Define if operating system has alloca.h header. */
346
+ #undef JEMALLOC_HAS_ALLOCA_H
347
+
348
+ /* C99 restrict keyword supported. */
349
+ #undef JEMALLOC_HAS_RESTRICT
350
+
351
+ /* For use by hash code. */
352
+ #undef JEMALLOC_BIG_ENDIAN
353
+
354
+ /* sizeof(int) == 2^LG_SIZEOF_INT. */
355
+ #undef LG_SIZEOF_INT
356
+
357
+ /* sizeof(long) == 2^LG_SIZEOF_LONG. */
358
+ #undef LG_SIZEOF_LONG
359
+
360
+ /* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
361
+ #undef LG_SIZEOF_LONG_LONG
362
+
363
+ /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
364
+ #undef LG_SIZEOF_INTMAX_T
365
+
366
+ /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
367
+ #undef JEMALLOC_GLIBC_MALLOC_HOOK
368
+
369
+ /* glibc memalign hook. */
370
+ #undef JEMALLOC_GLIBC_MEMALIGN_HOOK
371
+
372
+ /* pthread support */
373
+ #undef JEMALLOC_HAVE_PTHREAD
374
+
375
+ /* dlsym() support */
376
+ #undef JEMALLOC_HAVE_DLSYM
377
+
378
+ /* Adaptive mutex support in pthreads. */
379
+ #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
380
+
381
+ /* GNU specific sched_getcpu support */
382
+ #undef JEMALLOC_HAVE_SCHED_GETCPU
383
+
384
+ /* GNU specific sched_setaffinity support */
385
+ #undef JEMALLOC_HAVE_SCHED_SETAFFINITY
386
+
387
+ /*
388
+ * If defined, all the features necessary for background threads are present.
389
+ */
390
+ #undef JEMALLOC_BACKGROUND_THREAD
391
+
392
+ /*
393
+ * If defined, jemalloc symbols are not exported (doesn't work when
394
+ * JEMALLOC_PREFIX is not defined).
395
+ */
396
+ #undef JEMALLOC_EXPORT
397
+
398
+ /* config.malloc_conf options string. */
399
+ #undef JEMALLOC_CONFIG_MALLOC_CONF
400
+
401
+ /* If defined, jemalloc takes the malloc/free/etc. symbol names. */
402
+ #undef JEMALLOC_IS_MALLOC
403
+
404
+ /*
405
+ * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
406
+ */
407
+ #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
408
+
409
+ /* Performs additional safety checks when defined. */
410
+ #undef JEMALLOC_OPT_SAFETY_CHECKS
411
+
412
+ /* Is C++ support being built? */
413
+ #undef JEMALLOC_ENABLE_CXX
414
+
415
+ /* Performs additional size checks when defined. */
416
+ #undef JEMALLOC_OPT_SIZE_CHECKS
417
+
418
+ /* Allows sampled junk and stash for checking use-after-free when defined. */
419
+ #undef JEMALLOC_UAF_DETECTION
420
+
421
+ /* Darwin VM_MAKE_TAG support */
422
+ #undef JEMALLOC_HAVE_VM_MAKE_TAG
423
+
424
+ /* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
425
+ #undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
426
+
427
+ #endif /* JEMALLOC_INTERNAL_DEFS_H_ */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_EXTERNS_H
2
+ #define JEMALLOC_INTERNAL_EXTERNS_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/hpa_opts.h"
6
+ #include "jemalloc/internal/sec_opts.h"
7
+ #include "jemalloc/internal/tsd_types.h"
8
+ #include "jemalloc/internal/nstime.h"
9
+
10
+ /* TSD checks this to set thread local slow state accordingly. */
11
+ extern bool malloc_slow;
12
+
13
+ /* Run-time options. */
14
+ extern bool opt_abort;
15
+ extern bool opt_abort_conf;
16
+ extern bool opt_trust_madvise;
17
+ extern bool opt_confirm_conf;
18
+ extern bool opt_hpa;
19
+ extern hpa_shard_opts_t opt_hpa_opts;
20
+ extern sec_opts_t opt_hpa_sec_opts;
21
+
22
+ extern const char *opt_junk;
23
+ extern bool opt_junk_alloc;
24
+ extern bool opt_junk_free;
25
+ extern void (*junk_free_callback)(void *ptr, size_t size);
26
+ extern void (*junk_alloc_callback)(void *ptr, size_t size);
27
+ extern bool opt_utrace;
28
+ extern bool opt_xmalloc;
29
+ extern bool opt_experimental_infallible_new;
30
+ extern bool opt_zero;
31
+ extern unsigned opt_narenas;
32
+ extern zero_realloc_action_t opt_zero_realloc_action;
33
+ extern malloc_init_t malloc_init_state;
34
+ extern const char *zero_realloc_mode_names[];
35
+ extern atomic_zu_t zero_realloc_count;
36
+ extern bool opt_cache_oblivious;
37
+
38
+ /* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
39
+ extern uintptr_t san_cache_bin_nonfast_mask;
40
+
41
+ /* Number of CPUs. */
42
+ extern unsigned ncpus;
43
+
44
+ /* Number of arenas used for automatic multiplexing of threads and arenas. */
45
+ extern unsigned narenas_auto;
46
+
47
+ /* Base index for manual arenas. */
48
+ extern unsigned manual_arena_base;
49
+
50
+ /*
51
+ * Arenas that are used to service external requests. Not all elements of the
52
+ * arenas array are necessarily used; arenas are created lazily as needed.
53
+ */
54
+ extern atomic_p_t arenas[];
55
+
56
+ void *a0malloc(size_t size);
57
+ void a0dalloc(void *ptr);
58
+ void *bootstrap_malloc(size_t size);
59
+ void *bootstrap_calloc(size_t num, size_t size);
60
+ void bootstrap_free(void *ptr);
61
+ void arena_set(unsigned ind, arena_t *arena);
62
+ unsigned narenas_total_get(void);
63
+ arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
64
+ arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
65
+ void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
66
+ void iarena_cleanup(tsd_t *tsd);
67
+ void arena_cleanup(tsd_t *tsd);
68
+ size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
69
+ void jemalloc_prefork(void);
70
+ void jemalloc_postfork_parent(void);
71
+ void jemalloc_postfork_child(void);
72
+ void je_sdallocx_noflags(void *ptr, size_t size);
73
+ void *malloc_default(size_t size);
74
+
75
+ #endif /* JEMALLOC_INTERNAL_EXTERNS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_INCLUDES_H
2
+ #define JEMALLOC_INTERNAL_INCLUDES_H
3
+
4
+ /*
5
+ * jemalloc can conceptually be broken into components (arena, tcache, etc.),
6
+ * but there are circular dependencies that cannot be broken without
7
+ * substantial performance degradation.
8
+ *
9
+ * Historically, we dealt with this by each header into four sections (types,
10
+ * structs, externs, and inlines), and included each header file multiple times
11
+ * in this file, picking out the portion we want on each pass using the
12
+ * following #defines:
13
+ * JEMALLOC_H_TYPES : Preprocessor-defined constants and pseudo-opaque data
14
+ * types.
15
+ * JEMALLOC_H_STRUCTS : Data structures.
16
+ * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
17
+ * JEMALLOC_H_INLINES : Inline functions.
18
+ *
19
+ * We're moving toward a world in which the dependencies are explicit; each file
20
+ * will #include the headers it depends on (rather than relying on them being
21
+ * implicitly available via this file including every header file in the
22
+ * project).
23
+ *
24
+ * We're now in an intermediate state: we've broken up the header files to avoid
25
+ * having to include each one multiple times, but have not yet moved the
26
+ * dependency information into the header files (i.e. we still rely on the
27
+ * ordering in this file to ensure all a header's dependencies are available in
28
+ * its translation unit). Each component is now broken up into multiple header
29
+ * files, corresponding to the sections above (e.g. instead of "foo.h", we now
30
+ * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
31
+ *
32
+ * Those files which have been converted to explicitly include their
33
+ * inter-component dependencies are now in the initial HERMETIC HEADERS
34
+ * section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
35
+ * must be included first in every translation unit) for system headers and
36
+ * global jemalloc definitions, however.
37
+ */
38
+
39
+ /******************************************************************************/
40
+ /* TYPES */
41
+ /******************************************************************************/
42
+
43
+ #include "jemalloc/internal/arena_types.h"
44
+ #include "jemalloc/internal/tcache_types.h"
45
+ #include "jemalloc/internal/prof_types.h"
46
+
47
+ /******************************************************************************/
48
+ /* STRUCTS */
49
+ /******************************************************************************/
50
+
51
+ #include "jemalloc/internal/prof_structs.h"
52
+ #include "jemalloc/internal/arena_structs.h"
53
+ #include "jemalloc/internal/tcache_structs.h"
54
+ #include "jemalloc/internal/background_thread_structs.h"
55
+
56
+ /******************************************************************************/
57
+ /* EXTERNS */
58
+ /******************************************************************************/
59
+
60
+ #include "jemalloc/internal/jemalloc_internal_externs.h"
61
+ #include "jemalloc/internal/arena_externs.h"
62
+ #include "jemalloc/internal/large_externs.h"
63
+ #include "jemalloc/internal/tcache_externs.h"
64
+ #include "jemalloc/internal/prof_externs.h"
65
+ #include "jemalloc/internal/background_thread_externs.h"
66
+
67
+ /******************************************************************************/
68
+ /* INLINES */
69
+ /******************************************************************************/
70
+
71
+ #include "jemalloc/internal/jemalloc_internal_inlines_a.h"
72
+ /*
73
+ * Include portions of arena code interleaved with tcache code in order to
74
+ * resolve circular dependencies.
75
+ */
76
+ #include "jemalloc/internal/arena_inlines_a.h"
77
+ #include "jemalloc/internal/jemalloc_internal_inlines_b.h"
78
+ #include "jemalloc/internal/tcache_inlines.h"
79
+ #include "jemalloc/internal/arena_inlines_b.h"
80
+ #include "jemalloc/internal/jemalloc_internal_inlines_c.h"
81
+ #include "jemalloc/internal/prof_inlines.h"
82
+ #include "jemalloc/internal/background_thread_inlines.h"
83
+
84
+ #endif /* JEMALLOC_INTERNAL_INCLUDES_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_INLINES_A_H
2
+ #define JEMALLOC_INTERNAL_INLINES_A_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/bit_util.h"
6
+ #include "jemalloc/internal/jemalloc_internal_types.h"
7
+ #include "jemalloc/internal/sc.h"
8
+ #include "jemalloc/internal/ticker.h"
9
+
10
+ JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
11
+ malloc_getcpu(void) {
12
+ assert(have_percpu_arena);
13
+ #if defined(_WIN32)
14
+ return GetCurrentProcessorNumber();
15
+ #elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
16
+ return (malloc_cpuid_t)sched_getcpu();
17
+ #else
18
+ not_reached();
19
+ return -1;
20
+ #endif
21
+ }
22
+
23
+ /* Return the chosen arena index based on current cpu. */
24
+ JEMALLOC_ALWAYS_INLINE unsigned
25
+ percpu_arena_choose(void) {
26
+ assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
27
+
28
+ malloc_cpuid_t cpuid = malloc_getcpu();
29
+ assert(cpuid >= 0);
30
+
31
+ unsigned arena_ind;
32
+ if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
33
+ 2)) {
34
+ arena_ind = cpuid;
35
+ } else {
36
+ assert(opt_percpu_arena == per_phycpu_arena);
37
+ /* Hyper threads on the same physical CPU share arena. */
38
+ arena_ind = cpuid - ncpus / 2;
39
+ }
40
+
41
+ return arena_ind;
42
+ }
43
+
44
+ /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
45
+ JEMALLOC_ALWAYS_INLINE unsigned
46
+ percpu_arena_ind_limit(percpu_arena_mode_t mode) {
47
+ assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
48
+ if (mode == per_phycpu_arena && ncpus > 1) {
49
+ if (ncpus % 2) {
50
+ /* This likely means a misconfig. */
51
+ return ncpus / 2 + 1;
52
+ }
53
+ return ncpus / 2;
54
+ } else {
55
+ return ncpus;
56
+ }
57
+ }
58
+
59
+ static inline arena_t *
60
+ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
61
+ arena_t *ret;
62
+
63
+ assert(ind < MALLOCX_ARENA_LIMIT);
64
+
65
+ ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
66
+ if (unlikely(ret == NULL)) {
67
+ if (init_if_missing) {
68
+ ret = arena_init(tsdn, ind, &arena_config_default);
69
+ }
70
+ }
71
+ return ret;
72
+ }
73
+
74
+ JEMALLOC_ALWAYS_INLINE bool
75
+ tcache_available(tsd_t *tsd) {
76
+ /*
77
+ * Thread specific auto tcache might be unavailable if: 1) during tcache
78
+ * initialization, or 2) disabled through thread.tcache.enabled mallctl
79
+ * or config options. This check covers all cases.
80
+ */
81
+ if (likely(tsd_tcache_enabled_get(tsd))) {
82
+ /* Associated arena == NULL implies tcache init in progress. */
83
+ if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
84
+ tcache_assert_initialized(tsd_tcachep_get(tsd));
85
+ }
86
+ return true;
87
+ }
88
+
89
+ return false;
90
+ }
91
+
92
+ JEMALLOC_ALWAYS_INLINE tcache_t *
93
+ tcache_get(tsd_t *tsd) {
94
+ if (!tcache_available(tsd)) {
95
+ return NULL;
96
+ }
97
+
98
+ return tsd_tcachep_get(tsd);
99
+ }
100
+
101
+ JEMALLOC_ALWAYS_INLINE tcache_slow_t *
102
+ tcache_slow_get(tsd_t *tsd) {
103
+ if (!tcache_available(tsd)) {
104
+ return NULL;
105
+ }
106
+
107
+ return tsd_tcache_slowp_get(tsd);
108
+ }
109
+
110
+ static inline void
111
+ pre_reentrancy(tsd_t *tsd, arena_t *arena) {
112
+ /* arena is the current context. Reentry from a0 is not allowed. */
113
+ assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
114
+ tsd_pre_reentrancy_raw(tsd);
115
+ }
116
+
117
+ static inline void
118
+ post_reentrancy(tsd_t *tsd) {
119
+ tsd_post_reentrancy_raw(tsd);
120
+ }
121
+
122
+ #endif /* JEMALLOC_INTERNAL_INLINES_A_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_INLINES_B_H
2
+ #define JEMALLOC_INTERNAL_INLINES_B_H
3
+
4
+ #include "jemalloc/internal/extent.h"
5
+
6
+ static inline void
7
+ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
8
+ assert(have_percpu_arena);
9
+ arena_t *oldarena = tsd_arena_get(tsd);
10
+ assert(oldarena != NULL);
11
+ unsigned oldind = arena_ind_get(oldarena);
12
+
13
+ if (oldind != cpu) {
14
+ unsigned newind = cpu;
15
+ arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
16
+ assert(newarena != NULL);
17
+
18
+ /* Set new arena/tcache associations. */
19
+ arena_migrate(tsd, oldarena, newarena);
20
+ tcache_t *tcache = tcache_get(tsd);
21
+ if (tcache != NULL) {
22
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
23
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
24
+ tcache, newarena);
25
+ }
26
+ }
27
+ }
28
+
29
+
30
+ /* Choose an arena based on a per-thread value. */
31
+ static inline arena_t *
32
+ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
33
+ arena_t *ret;
34
+
35
+ if (arena != NULL) {
36
+ return arena;
37
+ }
38
+
39
+ /* During reentrancy, arena 0 is the safest bet. */
40
+ if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
41
+ return arena_get(tsd_tsdn(tsd), 0, true);
42
+ }
43
+
44
+ ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
45
+ if (unlikely(ret == NULL)) {
46
+ ret = arena_choose_hard(tsd, internal);
47
+ assert(ret);
48
+ if (tcache_available(tsd)) {
49
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
50
+ tcache_t *tcache = tsd_tcachep_get(tsd);
51
+ if (tcache_slow->arena != NULL) {
52
+ /* See comments in tsd_tcache_data_init().*/
53
+ assert(tcache_slow->arena ==
54
+ arena_get(tsd_tsdn(tsd), 0, false));
55
+ if (tcache_slow->arena != ret) {
56
+ tcache_arena_reassociate(tsd_tsdn(tsd),
57
+ tcache_slow, tcache, ret);
58
+ }
59
+ } else {
60
+ tcache_arena_associate(tsd_tsdn(tsd),
61
+ tcache_slow, tcache, ret);
62
+ }
63
+ }
64
+ }
65
+
66
+ /*
67
+ * Note that for percpu arena, if the current arena is outside of the
68
+ * auto percpu arena range, (i.e. thread is assigned to a manually
69
+ * managed arena), then percpu arena is skipped.
70
+ */
71
+ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
72
+ !internal && (arena_ind_get(ret) <
73
+ percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
74
+ tsd_tsdn(tsd))) {
75
+ unsigned ind = percpu_arena_choose();
76
+ if (arena_ind_get(ret) != ind) {
77
+ percpu_arena_update(tsd, ind);
78
+ ret = tsd_arena_get(tsd);
79
+ }
80
+ ret->last_thd = tsd_tsdn(tsd);
81
+ }
82
+
83
+ return ret;
84
+ }
85
+
86
+ static inline arena_t *
87
+ arena_choose(tsd_t *tsd, arena_t *arena) {
88
+ return arena_choose_impl(tsd, arena, false);
89
+ }
90
+
91
+ static inline arena_t *
92
+ arena_ichoose(tsd_t *tsd, arena_t *arena) {
93
+ return arena_choose_impl(tsd, arena, true);
94
+ }
95
+
96
+ static inline bool
97
+ arena_is_auto(arena_t *arena) {
98
+ assert(narenas_auto > 0);
99
+
100
+ return (arena_ind_get(arena) < manual_arena_base);
101
+ }
102
+
103
+ #endif /* JEMALLOC_INTERNAL_INLINES_B_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_INLINES_C_H
2
+ #define JEMALLOC_INTERNAL_INLINES_C_H
3
+
4
+ #include "jemalloc/internal/hook.h"
5
+ #include "jemalloc/internal/jemalloc_internal_types.h"
6
+ #include "jemalloc/internal/log.h"
7
+ #include "jemalloc/internal/sz.h"
8
+ #include "jemalloc/internal/thread_event.h"
9
+ #include "jemalloc/internal/witness.h"
10
+
11
+ /*
12
+ * Translating the names of the 'i' functions:
13
+ * Abbreviations used in the first part of the function name (before
14
+ * alloc/dalloc) describe what that function accomplishes:
15
+ * a: arena (query)
16
+ * s: size (query, or sized deallocation)
17
+ * e: extent (query)
18
+ * p: aligned (allocates)
19
+ * vs: size (query, without knowing that the pointer is into the heap)
20
+ * r: rallocx implementation
21
+ * x: xallocx implementation
22
+ * Abbreviations used in the second part of the function name (after
23
+ * alloc/dalloc) describe the arguments it takes
24
+ * z: whether to return zeroed memory
25
+ * t: accepts a tcache_t * parameter
26
+ * m: accepts an arena_t * parameter
27
+ */
28
+
29
+ JEMALLOC_ALWAYS_INLINE arena_t *
30
+ iaalloc(tsdn_t *tsdn, const void *ptr) {
31
+ assert(ptr != NULL);
32
+
33
+ return arena_aalloc(tsdn, ptr);
34
+ }
35
+
36
+ JEMALLOC_ALWAYS_INLINE size_t
37
+ isalloc(tsdn_t *tsdn, const void *ptr) {
38
+ assert(ptr != NULL);
39
+
40
+ return arena_salloc(tsdn, ptr);
41
+ }
42
+
43
+ JEMALLOC_ALWAYS_INLINE void *
44
+ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
45
+ bool is_internal, arena_t *arena, bool slow_path) {
46
+ void *ret;
47
+
48
+ assert(!is_internal || tcache == NULL);
49
+ assert(!is_internal || arena == NULL || arena_is_auto(arena));
50
+ if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
51
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
52
+ WITNESS_RANK_CORE, 0);
53
+ }
54
+
55
+ ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
56
+ if (config_stats && is_internal && likely(ret != NULL)) {
57
+ arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
58
+ }
59
+ return ret;
60
+ }
61
+
62
+ JEMALLOC_ALWAYS_INLINE void *
63
+ ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
64
+ return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
65
+ NULL, slow_path);
66
+ }
67
+
68
+ JEMALLOC_ALWAYS_INLINE void *
69
+ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
70
+ tcache_t *tcache, bool is_internal, arena_t *arena) {
71
+ void *ret;
72
+
73
+ assert(usize != 0);
74
+ assert(usize == sz_sa2u(usize, alignment));
75
+ assert(!is_internal || tcache == NULL);
76
+ assert(!is_internal || arena == NULL || arena_is_auto(arena));
77
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
78
+ WITNESS_RANK_CORE, 0);
79
+
80
+ ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
81
+ assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
82
+ if (config_stats && is_internal && likely(ret != NULL)) {
83
+ arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
84
+ }
85
+ return ret;
86
+ }
87
+
88
+ JEMALLOC_ALWAYS_INLINE void *
89
+ ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
90
+ tcache_t *tcache, arena_t *arena) {
91
+ return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
92
+ }
93
+
94
+ JEMALLOC_ALWAYS_INLINE void *
95
+ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
96
+ return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
97
+ tcache_get(tsd), false, NULL);
98
+ }
99
+
100
+ JEMALLOC_ALWAYS_INLINE size_t
101
+ ivsalloc(tsdn_t *tsdn, const void *ptr) {
102
+ return arena_vsalloc(tsdn, ptr);
103
+ }
104
+
105
+ JEMALLOC_ALWAYS_INLINE void
106
+ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
107
+ emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) {
108
+ assert(ptr != NULL);
109
+ assert(!is_internal || tcache == NULL);
110
+ assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
111
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
112
+ WITNESS_RANK_CORE, 0);
113
+ if (config_stats && is_internal) {
114
+ arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
115
+ }
116
+ if (!is_internal && !tsdn_null(tsdn) &&
117
+ tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
118
+ assert(tcache == NULL);
119
+ }
120
+ arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
121
+ }
122
+
123
+ JEMALLOC_ALWAYS_INLINE void
124
+ idalloc(tsd_t *tsd, void *ptr) {
125
+ idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
126
+ }
127
+
128
+ JEMALLOC_ALWAYS_INLINE void
129
+ isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
130
+ emap_alloc_ctx_t *alloc_ctx, bool slow_path) {
131
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
132
+ WITNESS_RANK_CORE, 0);
133
+ arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
134
+ }
135
+
136
+ JEMALLOC_ALWAYS_INLINE void *
137
+ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
138
+ size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
139
+ hook_ralloc_args_t *hook_args) {
140
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
141
+ WITNESS_RANK_CORE, 0);
142
+ void *p;
143
+ size_t usize, copysize;
144
+
145
+ usize = sz_sa2u(size, alignment);
146
+ if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
147
+ return NULL;
148
+ }
149
+ p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
150
+ if (p == NULL) {
151
+ return NULL;
152
+ }
153
+ /*
154
+ * Copy at most size bytes (not size+extra), since the caller has no
155
+ * expectation that the extra bytes will be reliably preserved.
156
+ */
157
+ copysize = (size < oldsize) ? size : oldsize;
158
+ memcpy(p, ptr, copysize);
159
+ hook_invoke_alloc(hook_args->is_realloc
160
+ ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
161
+ hook_args->args);
162
+ hook_invoke_dalloc(hook_args->is_realloc
163
+ ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
164
+ isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
165
+ return p;
166
+ }
167
+
168
+ /*
169
+ * is_realloc threads through the knowledge of whether or not this call comes
170
+ * from je_realloc (as opposed to je_rallocx); this ensures that we pass the
171
+ * correct entry point into any hooks.
172
+ * Note that these functions are all force-inlined, so no actual bool gets
173
+ * passed-around anywhere.
174
+ */
175
+ JEMALLOC_ALWAYS_INLINE void *
176
+ iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
177
+ bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
178
+ {
179
+ assert(ptr != NULL);
180
+ assert(size != 0);
181
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
182
+ WITNESS_RANK_CORE, 0);
183
+
184
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
185
+ != 0) {
186
+ /*
187
+ * Existing object alignment is inadequate; allocate new space
188
+ * and copy.
189
+ */
190
+ return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
191
+ zero, tcache, arena, hook_args);
192
+ }
193
+
194
+ return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
195
+ tcache, hook_args);
196
+ }
197
+
198
+ JEMALLOC_ALWAYS_INLINE void *
199
+ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
200
+ bool zero, hook_ralloc_args_t *hook_args) {
201
+ return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
202
+ tcache_get(tsd), NULL, hook_args);
203
+ }
204
+
205
+ JEMALLOC_ALWAYS_INLINE bool
206
+ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
207
+ size_t alignment, bool zero, size_t *newsize) {
208
+ assert(ptr != NULL);
209
+ assert(size != 0);
210
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
211
+ WITNESS_RANK_CORE, 0);
212
+
213
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
214
+ != 0) {
215
+ /* Existing object alignment is inadequate. */
216
+ *newsize = oldsize;
217
+ return true;
218
+ }
219
+
220
+ return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
221
+ newsize);
222
+ }
223
+
224
+ JEMALLOC_ALWAYS_INLINE void
225
+ fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after,
226
+ cache_bin_t *bin, void *ret) {
227
+ thread_allocated_set(tsd, allocated_after);
228
+ if (config_stats) {
229
+ bin->tstats.nrequests++;
230
+ }
231
+
232
+ LOG("core.malloc.exit", "result: %p", ret);
233
+ }
234
+
235
+ JEMALLOC_ALWAYS_INLINE bool
236
+ malloc_initialized(void) {
237
+ return (malloc_init_state == malloc_init_initialized);
238
+ }
239
+
240
+ /*
241
+ * malloc() fastpath. Included here so that we can inline it into operator new;
242
+ * function call overhead there is non-negligible as a fraction of total CPU in
243
+ * allocation-heavy C++ programs. We take the fallback alloc to allow malloc
244
+ * (which can return NULL) to differ in its behavior from operator new (which
245
+ * can't). It matches the signature of malloc / operator new so that we can
246
+ * tail-call the fallback allocator, allowing us to avoid setting up the call
247
+ * frame in the common case.
248
+ *
249
+ * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
250
+ * tcache. If either of these is false, we tail-call to the slowpath,
251
+ * malloc_default(). Tail-calling is used to avoid any caller-saved
252
+ * registers.
253
+ *
254
+ * fastpath supports ticker and profiling, both of which will also
255
+ * tail-call to the slowpath if they fire.
256
+ */
257
+ JEMALLOC_ALWAYS_INLINE void *
258
+ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
259
+ LOG("core.malloc.entry", "size: %zu", size);
260
+ if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
261
+ return fallback_alloc(size);
262
+ }
263
+
264
+ tsd_t *tsd = tsd_get(false);
265
+ if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) {
266
+ return fallback_alloc(size);
267
+ }
268
+ /*
269
+ * The code below till the branch checking the next_event threshold may
270
+ * execute before malloc_init(), in which case the threshold is 0 to
271
+ * trigger slow path and initialization.
272
+ *
273
+ * Note that when uninitialized, only the fast-path variants of the sz /
274
+ * tsd facilities may be called.
275
+ */
276
+ szind_t ind;
277
+ /*
278
+ * The thread_allocated counter in tsd serves as a general purpose
279
+ * accumulator for bytes of allocation to trigger different types of
280
+ * events. usize is always needed to advance thread_allocated, though
281
+ * it's not always needed in the core allocation logic.
282
+ */
283
+ size_t usize;
284
+ sz_size2index_usize_fastpath(size, &ind, &usize);
285
+ /* Fast path relies on size being a bin. */
286
+ assert(ind < SC_NBINS);
287
+ assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) &&
288
+ (size <= SC_SMALL_MAXCLASS));
289
+
290
+ uint64_t allocated, threshold;
291
+ te_malloc_fastpath_ctx(tsd, &allocated, &threshold);
292
+ uint64_t allocated_after = allocated + usize;
293
+ /*
294
+ * The ind and usize might be uninitialized (or partially) before
295
+ * malloc_init(). The assertions check for: 1) full correctness (usize
296
+ * & ind) when initialized; and 2) guaranteed slow-path (threshold == 0)
297
+ * when !initialized.
298
+ */
299
+ if (!malloc_initialized()) {
300
+ assert(threshold == 0);
301
+ } else {
302
+ assert(ind == sz_size2index(size));
303
+ assert(usize > 0 && usize == sz_index2size(ind));
304
+ }
305
+ /*
306
+ * Check for events and tsd non-nominal (fast_threshold will be set to
307
+ * 0) in a single branch.
308
+ */
309
+ if (unlikely(allocated_after >= threshold)) {
310
+ return fallback_alloc(size);
311
+ }
312
+ assert(tsd_fast(tsd));
313
+
314
+ tcache_t *tcache = tsd_tcachep_get(tsd);
315
+ assert(tcache == tcache_get(tsd));
316
+ cache_bin_t *bin = &tcache->bins[ind];
317
+ bool tcache_success;
318
+ void *ret;
319
+
320
+ /*
321
+ * We split up the code this way so that redundant low-water
322
+ * computation doesn't happen on the (more common) case in which we
323
+ * don't touch the low water mark. The compiler won't do this
324
+ * duplication on its own.
325
+ */
326
+ ret = cache_bin_alloc_easy(bin, &tcache_success);
327
+ if (tcache_success) {
328
+ fastpath_success_finish(tsd, allocated_after, bin, ret);
329
+ return ret;
330
+ }
331
+ ret = cache_bin_alloc(bin, &tcache_success);
332
+ if (tcache_success) {
333
+ fastpath_success_finish(tsd, allocated_after, bin, ret);
334
+ return ret;
335
+ }
336
+
337
+ return fallback_alloc(size);
338
+ }
339
+
340
+ JEMALLOC_ALWAYS_INLINE int
341
+ iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
342
+ int defrag = 0;
343
+ emap_alloc_ctx_t alloc_ctx;
344
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
345
+ if (likely(alloc_ctx.slab)) {
346
+ /* Small allocation. */
347
+ edata_t *slab = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
348
+ arena_t *arena = arena_get_from_edata(slab);
349
+ szind_t binind = edata_szind_get(slab);
350
+ unsigned binshard = edata_binshard_get(slab);
351
+ bin_t *bin = arena_get_bin(arena, binind, binshard);
352
+ malloc_mutex_lock(tsdn, &bin->lock);
353
+ arena_dalloc_bin_locked_info_t info;
354
+ arena_dalloc_bin_locked_begin(&info, binind);
355
+ /* Don't bother moving allocations from the slab currently used for new allocations */
356
+ if (slab != bin->slabcur) {
357
+ int free_in_slab = edata_nfree_get(slab);
358
+ if (free_in_slab) {
359
+ const bin_info_t *bin_info = &bin_infos[binind];
360
+ /* Find number of non-full slabs and the number of regs in them */
361
+ unsigned long curslabs = 0;
362
+ size_t curregs = 0;
363
+ /* Run on all bin shards (usually just one) */
364
+ for (uint32_t i=0; i< bin_info->n_shards; i++) {
365
+ bin_t *bb = arena_get_bin(arena, binind, i);
366
+ curslabs += bb->stats.nonfull_slabs;
367
+ /* Deduct the regs in full slabs (they're not part of the game) */
368
+ unsigned long full_slabs = bb->stats.curslabs - bb->stats.nonfull_slabs;
369
+ curregs += bb->stats.curregs - full_slabs * bin_info->nregs;
370
+ if (bb->slabcur) {
371
+ /* Remove slabcur from the overall utilization (not a candidate to nove from) */
372
+ curregs -= bin_info->nregs - edata_nfree_get(bb->slabcur);
373
+ curslabs -= 1;
374
+ }
375
+ }
376
+ /* Compare the utilization ratio of the slab in question to the total average
377
+ * among non-full slabs. To avoid precision loss in division, we do that by
378
+ * extrapolating the usage of the slab as if all slabs have the same usage.
379
+ * If this slab is less used than the average, we'll prefer to move the data
380
+ * to hopefully more used ones. To avoid stagnation when all slabs have the same
381
+ * utilization, we give additional 12.5% weight to the decision to defrag. */
382
+ defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs + curregs / 8;
383
+ }
384
+ }
385
+ arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
386
+ malloc_mutex_unlock(tsdn, &bin->lock);
387
+ }
388
+ return defrag;
389
+ }
390
+
391
+ #endif /* JEMALLOC_INTERNAL_INLINES_C_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_MACROS_H
2
+ #define JEMALLOC_INTERNAL_MACROS_H
3
+
4
+ #ifdef JEMALLOC_DEBUG
5
+ # define JEMALLOC_ALWAYS_INLINE static inline
6
+ #else
7
+ # ifdef _MSC_VER
8
+ # define JEMALLOC_ALWAYS_INLINE static __forceinline
9
+ # else
10
+ # define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
11
+ # endif
12
+ #endif
13
+ #ifdef _MSC_VER
14
+ # define inline _inline
15
+ #endif
16
+
17
+ #define UNUSED JEMALLOC_ATTR(unused)
18
+
19
+ #define ZU(z) ((size_t)z)
20
+ #define ZD(z) ((ssize_t)z)
21
+ #define QU(q) ((uint64_t)q)
22
+ #define QD(q) ((int64_t)q)
23
+
24
+ #define KZU(z) ZU(z##ULL)
25
+ #define KZD(z) ZD(z##LL)
26
+ #define KQU(q) QU(q##ULL)
27
+ #define KQD(q) QI(q##LL)
28
+
29
+ #ifndef __DECONST
30
+ # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
31
+ #endif
32
+
33
+ #if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
34
+ # define restrict
35
+ #endif
36
+
37
+ /* Various function pointers are static and immutable except during testing. */
38
+ #ifdef JEMALLOC_JET
39
+ # define JET_MUTABLE
40
+ #else
41
+ # define JET_MUTABLE const
42
+ #endif
43
+
44
+ #define JEMALLOC_VA_ARGS_HEAD(head, ...) head
45
+ #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
46
+
47
+ /* Diagnostic suppression macros */
48
+ #if defined(_MSC_VER) && !defined(__clang__)
49
+ # define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
50
+ # define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
51
+ # define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
52
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
53
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
54
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
55
+ # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
56
+ /* #pragma GCC diagnostic first appeared in gcc 4.6. */
57
+ #elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
58
+ (__GNUC_MINOR__ > 5)))) || defined(__clang__)
59
+ /*
60
+ * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
61
+ * diagnostic suppression macros and should not be used anywhere else.
62
+ */
63
+ # define JEMALLOC_PRAGMA__(X) _Pragma(#X)
64
+ # define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
65
+ # define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
66
+ # define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
67
+ JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
68
+
69
+ /*
70
+ * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
71
+ * all clang versions up to version 7 (currently trunk, unreleased). This macro
72
+ * suppresses the warning for the affected compiler versions only.
73
+ */
74
+ # if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
75
+ defined(__clang__)
76
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
77
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
78
+ # else
79
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
80
+ # endif
81
+
82
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
83
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
84
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
85
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
86
+ # if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
87
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
88
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
89
+ # else
90
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
91
+ # endif
92
+ # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
93
+ JEMALLOC_DIAGNOSTIC_PUSH \
94
+ JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
95
+ #else
96
+ # define JEMALLOC_DIAGNOSTIC_PUSH
97
+ # define JEMALLOC_DIAGNOSTIC_POP
98
+ # define JEMALLOC_DIAGNOSTIC_IGNORE(W)
99
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
100
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
101
+ # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
102
+ # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
103
+ #endif
104
+
105
+ /*
106
+ * Disables spurious diagnostics for all headers. Since these headers are not
107
+ * included by users directly, it does not affect their diagnostic settings.
108
+ */
109
+ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
110
+
111
+ #endif /* JEMALLOC_INTERNAL_MACROS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_TYPES_H
2
+ #define JEMALLOC_INTERNAL_TYPES_H
3
+
4
+ #include "jemalloc/internal/quantum.h"
5
+
6
+ /* Processor / core id type. */
7
+ typedef int malloc_cpuid_t;
8
+
9
+ /* When realloc(non-null-ptr, 0) is called, what happens? */
10
+ enum zero_realloc_action_e {
11
+ /* Realloc(ptr, 0) is free(ptr); return malloc(0); */
12
+ zero_realloc_action_alloc = 0,
13
+ /* Realloc(ptr, 0) is free(ptr); */
14
+ zero_realloc_action_free = 1,
15
+ /* Realloc(ptr, 0) aborts. */
16
+ zero_realloc_action_abort = 2
17
+ };
18
+ typedef enum zero_realloc_action_e zero_realloc_action_t;
19
+
20
+ /* Signature of write callback. */
21
+ typedef void (write_cb_t)(void *, const char *);
22
+
23
+ enum malloc_init_e {
24
+ malloc_init_uninitialized = 3,
25
+ malloc_init_a0_initialized = 2,
26
+ malloc_init_recursible = 1,
27
+ malloc_init_initialized = 0 /* Common case --> jnz. */
28
+ };
29
+ typedef enum malloc_init_e malloc_init_t;
30
+
31
+ /*
32
+ * Flags bits:
33
+ *
34
+ * a: arena
35
+ * t: tcache
36
+ * 0: unused
37
+ * z: zero
38
+ * n: alignment
39
+ *
40
+ * aaaaaaaa aaaatttt tttttttt 0znnnnnn
41
+ */
42
+ #define MALLOCX_ARENA_BITS 12
43
+ #define MALLOCX_TCACHE_BITS 12
44
+ #define MALLOCX_LG_ALIGN_BITS 6
45
+ #define MALLOCX_ARENA_SHIFT 20
46
+ #define MALLOCX_TCACHE_SHIFT 8
47
+ #define MALLOCX_ARENA_MASK \
48
+ (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
49
+ /* NB: Arena index bias decreases the maximum number of arenas by 1. */
50
+ #define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
51
+ #define MALLOCX_TCACHE_MASK \
52
+ (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
53
+ #define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
54
+ #define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
55
+ /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
56
+ #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
57
+ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
58
+ #define MALLOCX_ALIGN_GET(flags) \
59
+ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
60
+ #define MALLOCX_ZERO_GET(flags) \
61
+ ((bool)(flags & MALLOCX_ZERO))
62
+
63
+ #define MALLOCX_TCACHE_GET(flags) \
64
+ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
65
+ #define MALLOCX_ARENA_GET(flags) \
66
+ (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
67
+
68
+ /* Smallest size class to support. */
69
+ #define TINY_MIN (1U << LG_TINY_MIN)
70
+
71
+ #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
72
+ #define LONG_MASK (LONG - 1)
73
+
74
+ /* Return the smallest long multiple that is >= a. */
75
+ #define LONG_CEILING(a) \
76
+ (((a) + LONG_MASK) & ~LONG_MASK)
77
+
78
+ #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
79
+ #define PTR_MASK (SIZEOF_PTR - 1)
80
+
81
+ /* Return the smallest (void *) multiple that is >= a. */
82
+ #define PTR_CEILING(a) \
83
+ (((a) + PTR_MASK) & ~PTR_MASK)
84
+
85
+ /*
86
+ * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
87
+ * In addition, this controls the spacing of cacheline-spaced size classes.
88
+ *
89
+ * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
90
+ * only handle raw constants.
91
+ */
92
+ #define LG_CACHELINE 6
93
+ #define CACHELINE 64
94
+ #define CACHELINE_MASK (CACHELINE - 1)
95
+
96
+ /* Return the smallest cacheline multiple that is >= s. */
97
+ #define CACHELINE_CEILING(s) \
98
+ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
99
+
100
+ /* Return the nearest aligned address at or below a. */
101
+ #define ALIGNMENT_ADDR2BASE(a, alignment) \
102
+ ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
103
+
104
+ /* Return the offset between a and the nearest aligned address at or below a. */
105
+ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
106
+ ((size_t)((uintptr_t)(a) & (alignment - 1)))
107
+
108
+ /* Return the smallest alignment multiple that is >= s. */
109
+ #define ALIGNMENT_CEILING(s, alignment) \
110
+ (((s) + (alignment - 1)) & ((~(alignment)) + 1))
111
+
112
+ /* Declare a variable-length array. */
113
+ #if __STDC_VERSION__ < 199901L
114
+ # ifdef _MSC_VER
115
+ # include <malloc.h>
116
+ # define alloca _alloca
117
+ # else
118
+ # ifdef JEMALLOC_HAS_ALLOCA_H
119
+ # include <alloca.h>
120
+ # else
121
+ # include <stdlib.h>
122
+ # endif
123
+ # endif
124
+ # define VARIABLE_ARRAY(type, name, count) \
125
+ type *name = alloca(sizeof(type) * (count))
126
+ #else
127
+ # define VARIABLE_ARRAY(type, name, count) type name[(count)]
128
+ #endif
129
+
130
+ #endif /* JEMALLOC_INTERNAL_TYPES_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_PREAMBLE_H
2
+ #define JEMALLOC_PREAMBLE_H
3
+
4
+ #include "jemalloc_internal_defs.h"
5
+ #include "jemalloc/internal/jemalloc_internal_decls.h"
6
+
7
+ #if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
8
+ #include <sys/ktrace.h>
9
+ # if defined(JEMALLOC_UTRACE)
10
+ # define UTRACE_CALL(p, l) utrace(p, l)
11
+ # else
12
+ # define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
13
+ # define JEMALLOC_UTRACE
14
+ # endif
15
+ #endif
16
+
17
+ #define JEMALLOC_NO_DEMANGLE
18
+ #ifdef JEMALLOC_JET
19
+ # undef JEMALLOC_IS_MALLOC
20
+ # define JEMALLOC_N(n) jet_##n
21
+ # include "jemalloc/internal/public_namespace.h"
22
+ # define JEMALLOC_NO_RENAME
23
+ # include "../jemalloc@install_suffix@.h"
24
+ # undef JEMALLOC_NO_RENAME
25
+ #else
26
+ # define JEMALLOC_N(n) @private_namespace@##n
27
+ # include "../jemalloc@install_suffix@.h"
28
+ #endif
29
+
30
+ #if defined(JEMALLOC_OSATOMIC)
31
+ #include <libkern/OSAtomic.h>
32
+ #endif
33
+
34
+ #ifdef JEMALLOC_ZONE
35
+ #include <mach/mach_error.h>
36
+ #include <mach/mach_init.h>
37
+ #include <mach/vm_map.h>
38
+ #endif
39
+
40
+ #include "jemalloc/internal/jemalloc_internal_macros.h"
41
+
42
+ /*
43
+ * Note that the ordering matters here; the hook itself is name-mangled. We
44
+ * want the inclusion of hooks to happen early, so that we hook as much as
45
+ * possible.
46
+ */
47
+ #ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
48
+ # ifndef JEMALLOC_JET
49
+ # include "jemalloc/internal/private_namespace.h"
50
+ # else
51
+ # include "jemalloc/internal/private_namespace_jet.h"
52
+ # endif
53
+ #endif
54
+ #include "jemalloc/internal/test_hooks.h"
55
+
56
+ #ifdef JEMALLOC_DEFINE_MADVISE_FREE
57
+ # define JEMALLOC_MADV_FREE 8
58
+ #endif
59
+
60
+ static const bool config_debug =
61
+ #ifdef JEMALLOC_DEBUG
62
+ true
63
+ #else
64
+ false
65
+ #endif
66
+ ;
67
+ static const bool have_dss =
68
+ #ifdef JEMALLOC_DSS
69
+ true
70
+ #else
71
+ false
72
+ #endif
73
+ ;
74
+ static const bool have_madvise_huge =
75
+ #ifdef JEMALLOC_HAVE_MADVISE_HUGE
76
+ true
77
+ #else
78
+ false
79
+ #endif
80
+ ;
81
+ static const bool config_fill =
82
+ #ifdef JEMALLOC_FILL
83
+ true
84
+ #else
85
+ false
86
+ #endif
87
+ ;
88
+ static const bool config_lazy_lock =
89
+ #ifdef JEMALLOC_LAZY_LOCK
90
+ true
91
+ #else
92
+ false
93
+ #endif
94
+ ;
95
+ static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
96
+ static const bool config_prof =
97
+ #ifdef JEMALLOC_PROF
98
+ true
99
+ #else
100
+ false
101
+ #endif
102
+ ;
103
+ static const bool config_prof_libgcc =
104
+ #ifdef JEMALLOC_PROF_LIBGCC
105
+ true
106
+ #else
107
+ false
108
+ #endif
109
+ ;
110
+ static const bool config_prof_libunwind =
111
+ #ifdef JEMALLOC_PROF_LIBUNWIND
112
+ true
113
+ #else
114
+ false
115
+ #endif
116
+ ;
117
+ static const bool maps_coalesce =
118
+ #ifdef JEMALLOC_MAPS_COALESCE
119
+ true
120
+ #else
121
+ false
122
+ #endif
123
+ ;
124
+ static const bool config_stats =
125
+ #ifdef JEMALLOC_STATS
126
+ true
127
+ #else
128
+ false
129
+ #endif
130
+ ;
131
+ static const bool config_tls =
132
+ #ifdef JEMALLOC_TLS
133
+ true
134
+ #else
135
+ false
136
+ #endif
137
+ ;
138
+ static const bool config_utrace =
139
+ #ifdef JEMALLOC_UTRACE
140
+ true
141
+ #else
142
+ false
143
+ #endif
144
+ ;
145
+ static const bool config_xmalloc =
146
+ #ifdef JEMALLOC_XMALLOC
147
+ true
148
+ #else
149
+ false
150
+ #endif
151
+ ;
152
+ static const bool config_cache_oblivious =
153
+ #ifdef JEMALLOC_CACHE_OBLIVIOUS
154
+ true
155
+ #else
156
+ false
157
+ #endif
158
+ ;
159
+ /*
160
+ * Undocumented, for jemalloc development use only at the moment. See the note
161
+ * in jemalloc/internal/log.h.
162
+ */
163
+ static const bool config_log =
164
+ #ifdef JEMALLOC_LOG
165
+ true
166
+ #else
167
+ false
168
+ #endif
169
+ ;
170
+ /*
171
+ * Are extra safety checks enabled; things like checking the size of sized
172
+ * deallocations, double-frees, etc.
173
+ */
174
+ static const bool config_opt_safety_checks =
175
+ #ifdef JEMALLOC_OPT_SAFETY_CHECKS
176
+ true
177
+ #elif defined(JEMALLOC_DEBUG)
178
+ /*
179
+ * This lets us only guard safety checks by one flag instead of two; fast
180
+ * checks can guard solely by config_opt_safety_checks and run in debug mode
181
+ * too.
182
+ */
183
+ true
184
+ #else
185
+ false
186
+ #endif
187
+ ;
188
+
189
+ /*
190
+ * Extra debugging of sized deallocations too onerous to be included in the
191
+ * general safety checks.
192
+ */
193
+ static const bool config_opt_size_checks =
194
+ #if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
195
+ true
196
+ #else
197
+ false
198
+ #endif
199
+ ;
200
+
201
+ static const bool config_uaf_detection =
202
+ #if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
203
+ true
204
+ #else
205
+ false
206
+ #endif
207
+ ;
208
+
209
+ /* Whether or not the C++ extensions are enabled. */
210
+ static const bool config_enable_cxx =
211
+ #ifdef JEMALLOC_ENABLE_CXX
212
+ true
213
+ #else
214
+ false
215
+ #endif
216
+ ;
217
+
218
+ #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
219
+ /* Currently percpu_arena depends on sched_getcpu. */
220
+ #define JEMALLOC_PERCPU_ARENA
221
+ #endif
222
+ static const bool have_percpu_arena =
223
+ #ifdef JEMALLOC_PERCPU_ARENA
224
+ true
225
+ #else
226
+ false
227
+ #endif
228
+ ;
229
+ /*
230
+ * Undocumented, and not recommended; the application should take full
231
+ * responsibility for tracking provenance.
232
+ */
233
+ static const bool force_ivsalloc =
234
+ #ifdef JEMALLOC_FORCE_IVSALLOC
235
+ true
236
+ #else
237
+ false
238
+ #endif
239
+ ;
240
+ static const bool have_background_thread =
241
+ #ifdef JEMALLOC_BACKGROUND_THREAD
242
+ true
243
+ #else
244
+ false
245
+ #endif
246
+ ;
247
+ static const bool config_high_res_timer =
248
+ #ifdef JEMALLOC_HAVE_CLOCK_REALTIME
249
+ true
250
+ #else
251
+ false
252
+ #endif
253
+ ;
254
+
255
+ static const bool have_memcntl =
256
+ #ifdef JEMALLOC_HAVE_MEMCNTL
257
+ true
258
+ #else
259
+ false
260
+ #endif
261
+ ;
262
+
263
+ #endif /* JEMALLOC_PREAMBLE_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/large_externs.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
2
+ #define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
3
+
4
+ #include "jemalloc/internal/hook.h"
5
+
6
+ void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
7
+ void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
8
+ bool zero);
9
+ bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
10
+ size_t usize_max, bool zero);
11
+ void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
12
+ size_t alignment, bool zero, tcache_t *tcache,
13
+ hook_ralloc_args_t *hook_args);
14
+
15
+ void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
16
+ void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
17
+ void large_dalloc(tsdn_t *tsdn, edata_t *edata);
18
+ size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
19
+ void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
20
+ bool reset_recent);
21
+ void large_prof_tctx_reset(edata_t *edata);
22
+ void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
23
+
24
+ #endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/lockedint.h ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
2
+ #define JEMALLOC_INTERNAL_LOCKEDINT_H
3
+
4
+ /*
5
+ * In those architectures that support 64-bit atomics, we use atomic updates for
6
+ * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
7
+ * externally.
8
+ */
9
+
10
+ typedef struct locked_u64_s locked_u64_t;
11
+ #ifdef JEMALLOC_ATOMIC_U64
12
+ struct locked_u64_s {
13
+ atomic_u64_t val;
14
+ };
15
+ #else
16
+ /* Must hold the associated mutex. */
17
+ struct locked_u64_s {
18
+ uint64_t val;
19
+ };
20
+ #endif
21
+
22
+ typedef struct locked_zu_s locked_zu_t;
23
+ struct locked_zu_s {
24
+ atomic_zu_t val;
25
+ };
26
+
27
+ #ifndef JEMALLOC_ATOMIC_U64
28
+ # define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
29
+ # define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
30
+ malloc_mutex_init(&(mu), name, rank, rank_mode)
31
+ # define LOCKEDINT_MTX(mtx) (&(mtx))
32
+ # define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
33
+ # define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
34
+ # define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
35
+ # define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
36
+ malloc_mutex_postfork_parent(tsdn, &(mu))
37
+ # define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
38
+ malloc_mutex_postfork_child(tsdn, &(mu))
39
+ #else
40
+ # define LOCKEDINT_MTX_DECLARE(name)
41
+ # define LOCKEDINT_MTX(mtx) NULL
42
+ # define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
43
+ # define LOCKEDINT_MTX_LOCK(tsdn, mu)
44
+ # define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
45
+ # define LOCKEDINT_MTX_PREFORK(tsdn, mu)
46
+ # define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
47
+ # define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
48
+ #endif
49
+
50
+ #ifdef JEMALLOC_ATOMIC_U64
51
+ # define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
52
+ #else
53
+ # define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
54
+ malloc_mutex_assert_owner(tsdn, (mtx))
55
+ #endif
56
+
57
+ static inline uint64_t
58
+ locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
59
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
60
+ #ifdef JEMALLOC_ATOMIC_U64
61
+ return atomic_load_u64(&p->val, ATOMIC_RELAXED);
62
+ #else
63
+ return p->val;
64
+ #endif
65
+ }
66
+
67
+ static inline void
68
+ locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
69
+ uint64_t x) {
70
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
71
+ #ifdef JEMALLOC_ATOMIC_U64
72
+ atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
73
+ #else
74
+ p->val += x;
75
+ #endif
76
+ }
77
+
78
+ static inline void
79
+ locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
80
+ uint64_t x) {
81
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
82
+ #ifdef JEMALLOC_ATOMIC_U64
83
+ uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
84
+ assert(r - x <= r);
85
+ #else
86
+ p->val -= x;
87
+ assert(p->val + x >= p->val);
88
+ #endif
89
+ }
90
+
91
+ /* Increment and take modulus. Returns whether the modulo made any change. */
92
+ static inline bool
93
+ locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
94
+ const uint64_t x, const uint64_t modulus) {
95
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
96
+ uint64_t before, after;
97
+ bool overflow;
98
+ #ifdef JEMALLOC_ATOMIC_U64
99
+ before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
100
+ do {
101
+ after = before + x;
102
+ assert(after >= before);
103
+ overflow = (after >= modulus);
104
+ if (overflow) {
105
+ after %= modulus;
106
+ }
107
+ } while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
108
+ ATOMIC_RELAXED, ATOMIC_RELAXED));
109
+ #else
110
+ before = p->val;
111
+ after = before + x;
112
+ overflow = (after >= modulus);
113
+ if (overflow) {
114
+ after %= modulus;
115
+ }
116
+ p->val = after;
117
+ #endif
118
+ return overflow;
119
+ }
120
+
121
+ /*
122
+ * Non-atomically sets *dst += src. *dst needs external synchronization.
123
+ * This lets us avoid the cost of a fetch_add when its unnecessary (note that
124
+ * the types here are atomic).
125
+ */
126
+ static inline void
127
+ locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) {
128
+ #ifdef JEMALLOC_ATOMIC_U64
129
+ uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED);
130
+ atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED);
131
+ #else
132
+ dst->val += src;
133
+ #endif
134
+ }
135
+
136
+ static inline uint64_t
137
+ locked_read_u64_unsynchronized(locked_u64_t *p) {
138
+ #ifdef JEMALLOC_ATOMIC_U64
139
+ return atomic_load_u64(&p->val, ATOMIC_RELAXED);
140
+ #else
141
+ return p->val;
142
+ #endif
143
+ }
144
+
145
+ static inline void
146
+ locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
147
+ #ifdef JEMALLOC_ATOMIC_U64
148
+ atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
149
+ #else
150
+ p->val = x;
151
+ #endif
152
+ }
153
+
154
+ static inline size_t
155
+ locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
156
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
157
+ #ifdef JEMALLOC_ATOMIC_U64
158
+ return atomic_load_zu(&p->val, ATOMIC_RELAXED);
159
+ #else
160
+ return atomic_load_zu(&p->val, ATOMIC_RELAXED);
161
+ #endif
162
+ }
163
+
164
+ static inline void
165
+ locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
166
+ size_t x) {
167
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
168
+ #ifdef JEMALLOC_ATOMIC_U64
169
+ atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
170
+ #else
171
+ size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
172
+ atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED);
173
+ #endif
174
+ }
175
+
176
+ static inline void
177
+ locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
178
+ size_t x) {
179
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
180
+ #ifdef JEMALLOC_ATOMIC_U64
181
+ size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);
182
+ assert(r - x <= r);
183
+ #else
184
+ size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
185
+ atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED);
186
+ #endif
187
+ }
188
+
189
+ /* Like the _u64 variant, needs an externally synchronized *dst. */
190
+ static inline void
191
+ locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) {
192
+ size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED);
193
+ atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED);
194
+ }
195
+
196
+ /*
197
+ * Unlike the _u64 variant, this is safe to call unconditionally.
198
+ */
199
+ static inline size_t
200
+ locked_read_atomic_zu(locked_zu_t *p) {
201
+ return atomic_load_zu(&p->val, ATOMIC_RELAXED);
202
+ }
203
+
204
+ #endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/log.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_LOG_H
2
+ #define JEMALLOC_INTERNAL_LOG_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/malloc_io.h"
6
+ #include "jemalloc/internal/mutex.h"
7
+
8
+ #ifdef JEMALLOC_LOG
9
+ # define JEMALLOC_LOG_VAR_BUFSIZE 1000
10
+ #else
11
+ # define JEMALLOC_LOG_VAR_BUFSIZE 1
12
+ #endif
13
+
14
+ #define JEMALLOC_LOG_BUFSIZE 4096
15
+
16
+ /*
17
+ * The log malloc_conf option is a '|'-delimited list of log_var name segments
18
+ * which should be logged. The names are themselves hierarchical, with '.' as
19
+ * the delimiter (a "segment" is just a prefix in the log namespace). So, if
20
+ * you have:
21
+ *
22
+ * log("arena", "log msg for arena"); // 1
23
+ * log("arena.a", "log msg for arena.a"); // 2
24
+ * log("arena.b", "log msg for arena.b"); // 3
25
+ * log("arena.a.a", "log msg for arena.a.a"); // 4
26
+ * log("extent.a", "log msg for extent.a"); // 5
27
+ * log("extent.b", "log msg for extent.b"); // 6
28
+ *
29
+ * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
30
+ * 6 will print at runtime. You can enable logging from all log vars by
31
+ * writing "log=.".
32
+ *
33
+ * None of this should be regarded as a stable API for right now. It's intended
34
+ * as a debugging interface, to let us keep around some of our printf-debugging
35
+ * statements.
36
+ */
37
+
38
+ extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
39
+ extern atomic_b_t log_init_done;
40
+
41
+ typedef struct log_var_s log_var_t;
42
+ struct log_var_s {
43
+ /*
44
+ * Lowest bit is "inited", second lowest is "enabled". Putting them in
45
+ * a single word lets us avoid any fences on weak architectures.
46
+ */
47
+ atomic_u_t state;
48
+ const char *name;
49
+ };
50
+
51
+ #define LOG_NOT_INITIALIZED 0U
52
+ #define LOG_INITIALIZED_NOT_ENABLED 1U
53
+ #define LOG_ENABLED 2U
54
+
55
+ #define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
56
+
57
+ /*
58
+ * Returns the value we should assume for state (which is not necessarily
59
+ * accurate; if logging is done before logging has finished initializing, then
60
+ * we default to doing the safe thing by logging everything).
61
+ */
62
+ unsigned log_var_update_state(log_var_t *log_var);
63
+
64
+ /* We factor out the metadata management to allow us to test more easily. */
65
+ #define log_do_begin(log_var) \
66
+ if (config_log) { \
67
+ unsigned log_state = atomic_load_u(&(log_var).state, \
68
+ ATOMIC_RELAXED); \
69
+ if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
70
+ log_state = log_var_update_state(&(log_var)); \
71
+ assert(log_state != LOG_NOT_INITIALIZED); \
72
+ } \
73
+ if (log_state == LOG_ENABLED) { \
74
+ {
75
+ /* User code executes here. */
76
+ #define log_do_end(log_var) \
77
+ } \
78
+ } \
79
+ }
80
+
81
+ /*
82
+ * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
83
+ * preprocessing. To work around this, we take all potential extra arguments in
84
+ * a var-args functions. Since a varargs macro needs at least one argument in
85
+ * the "...", we accept the format string there, and require that the first
86
+ * argument in this "..." is a const char *.
87
+ */
88
+ static inline void
89
+ log_impl_varargs(const char *name, ...) {
90
+ char buf[JEMALLOC_LOG_BUFSIZE];
91
+ va_list ap;
92
+
93
+ va_start(ap, name);
94
+ const char *format = va_arg(ap, const char *);
95
+ size_t dst_offset = 0;
96
+ dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
97
+ dst_offset += malloc_vsnprintf(buf + dst_offset,
98
+ JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
99
+ dst_offset += malloc_snprintf(buf + dst_offset,
100
+ JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
101
+ va_end(ap);
102
+
103
+ malloc_write(buf);
104
+ }
105
+
106
+ /* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
107
+ #define LOG(log_var_str, ...) \
108
+ do { \
109
+ static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
110
+ log_do_begin(log_var) \
111
+ log_impl_varargs((log_var).name, __VA_ARGS__); \
112
+ log_do_end(log_var) \
113
+ } while (0)
114
+
115
+ #endif /* JEMALLOC_INTERNAL_LOG_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/malloc_io.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
2
+ #define JEMALLOC_INTERNAL_MALLOC_IO_H
3
+
4
+ #include "jemalloc/internal/jemalloc_internal_types.h"
5
+
6
+ #ifdef _WIN32
7
+ # ifdef _WIN64
8
+ # define FMT64_PREFIX "ll"
9
+ # define FMTPTR_PREFIX "ll"
10
+ # else
11
+ # define FMT64_PREFIX "ll"
12
+ # define FMTPTR_PREFIX ""
13
+ # endif
14
+ # define FMTd32 "d"
15
+ # define FMTu32 "u"
16
+ # define FMTx32 "x"
17
+ # define FMTd64 FMT64_PREFIX "d"
18
+ # define FMTu64 FMT64_PREFIX "u"
19
+ # define FMTx64 FMT64_PREFIX "x"
20
+ # define FMTdPTR FMTPTR_PREFIX "d"
21
+ # define FMTuPTR FMTPTR_PREFIX "u"
22
+ # define FMTxPTR FMTPTR_PREFIX "x"
23
+ #else
24
+ # include <inttypes.h>
25
+ # define FMTd32 PRId32
26
+ # define FMTu32 PRIu32
27
+ # define FMTx32 PRIx32
28
+ # define FMTd64 PRId64
29
+ # define FMTu64 PRIu64
30
+ # define FMTx64 PRIx64
31
+ # define FMTdPTR PRIdPTR
32
+ # define FMTuPTR PRIuPTR
33
+ # define FMTxPTR PRIxPTR
34
+ #endif
35
+
36
+ /* Size of stack-allocated buffer passed to buferror(). */
37
+ #define BUFERROR_BUF 64
38
+
39
+ /*
40
+ * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
41
+ * large enough for all possible uses within jemalloc.
42
+ */
43
+ #define MALLOC_PRINTF_BUFSIZE 4096
44
+
45
+ write_cb_t wrtmessage;
46
+ int buferror(int err, char *buf, size_t buflen);
47
+ uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
48
+ int base);
49
+ void malloc_write(const char *s);
50
+
51
+ /*
52
+ * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
53
+ * point math.
54
+ */
55
+ size_t malloc_vsnprintf(char *str, size_t size, const char *format,
56
+ va_list ap);
57
+ size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
58
+ JEMALLOC_FORMAT_PRINTF(3, 4);
59
+ /*
60
+ * The caller can set write_cb to null to choose to print with the
61
+ * je_malloc_message hook.
62
+ */
63
+ void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
64
+ va_list ap);
65
+ void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
66
+ ...) JEMALLOC_FORMAT_PRINTF(3, 4);
67
+ void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
68
+
69
+ static inline ssize_t
70
+ malloc_write_fd(int fd, const void *buf, size_t count) {
71
+ #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
72
+ /*
73
+ * Use syscall(2) rather than write(2) when possible in order to avoid
74
+ * the possibility of memory allocation within libc. This is necessary
75
+ * on FreeBSD; most operating systems do not have this problem though.
76
+ *
77
+ * syscall() returns long or int, depending on platform, so capture the
78
+ * result in the widest plausible type to avoid compiler warnings.
79
+ */
80
+ long result = syscall(SYS_write, fd, buf, count);
81
+ #else
82
+ ssize_t result = (ssize_t)write(fd, buf,
83
+ #ifdef _WIN32
84
+ (unsigned int)
85
+ #endif
86
+ count);
87
+ #endif
88
+ return (ssize_t)result;
89
+ }
90
+
91
+ static inline ssize_t
92
+ malloc_read_fd(int fd, void *buf, size_t count) {
93
+ #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
94
+ long result = syscall(SYS_read, fd, buf, count);
95
+ #else
96
+ ssize_t result = read(fd, buf,
97
+ #ifdef _WIN32
98
+ (unsigned int)
99
+ #endif
100
+ count);
101
+ #endif
102
+ return (ssize_t)result;
103
+ }
104
+
105
+ #endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/mpsc_queue.h ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H
2
+ #define JEMALLOC_INTERNAL_MPSC_QUEUE_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+
6
+ /*
7
+ * A concurrent implementation of a multi-producer, single-consumer queue. It
8
+ * supports three concurrent operations:
9
+ * - Push
10
+ * - Push batch
11
+ * - Pop batch
12
+ *
13
+ * These operations are all lock-free.
14
+ *
15
+ * The implementation is the simple two-stack queue built on a Treiber stack.
16
+ * It's not terribly efficient, but this isn't expected to go into anywhere with
17
+ * hot code. In fact, we don't really even need queue semantics in any
18
+ * anticipated use cases; we could get away with just the stack. But this way
19
+ * lets us frame the API in terms of the existing list types, which is a nice
20
+ * convenience. We can save on cache misses by introducing our own (parallel)
21
+ * single-linked list type here, and dropping FIFO semantics, if we need this to
22
+ * get faster. Since we're currently providing queue semantics though, we use
23
+ * the prev field in the link rather than the next field for Treiber-stack
24
+ * linkage, so that we can preserve order for bash-pushed lists (recall that the
25
+ * two-stack tricks reverses orders in the lock-free first stack).
26
+ */
27
+
28
+ #define mpsc_queue(a_type) \
29
+ struct { \
30
+ atomic_p_t tail; \
31
+ }
32
+
33
+ #define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \
34
+ a_list_type) \
35
+ /* Initialize a queue. */ \
36
+ a_attr void \
37
+ a_prefix##new(a_queue_type *queue); \
38
+ /* Insert all items in src into the queue, clearing src. */ \
39
+ a_attr void \
40
+ a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \
41
+ /* Insert node into the queue. */ \
42
+ a_attr void \
43
+ a_prefix##push(a_queue_type *queue, a_type *node); \
44
+ /* \
45
+ * Pop all items in the queue into the list at dst. dst should already \
46
+ * be initialized (and may contain existing items, which then remain \
47
+ * in dst). \
48
+ */ \
49
+ a_attr void \
50
+ a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst);
51
+
52
+ #define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \
53
+ a_list_type, a_link) \
54
+ a_attr void \
55
+ a_prefix##new(a_queue_type *queue) { \
56
+ atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \
57
+ } \
58
+ a_attr void \
59
+ a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \
60
+ /* \
61
+ * Reuse the ql list next field as the Treiber stack next \
62
+ * field. \
63
+ */ \
64
+ a_type *first = ql_first(src); \
65
+ a_type *last = ql_last(src, a_link); \
66
+ void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
67
+ do { \
68
+ /* \
69
+ * Note that this breaks the queue ring structure; \
70
+ * it's not a ring any more! \
71
+ */ \
72
+ first->a_link.qre_prev = cur_tail; \
73
+ /* \
74
+ * Note: the upcoming CAS doesn't need an atomic; every \
75
+ * push only needs to synchronize with the next pop, \
76
+ * which we get from the release sequence rules. \
77
+ */ \
78
+ } while (!atomic_compare_exchange_weak_p(&queue->tail, \
79
+ &cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \
80
+ ql_new(src); \
81
+ } \
82
+ a_attr void \
83
+ a_prefix##push(a_queue_type *queue, a_type *node) { \
84
+ ql_elm_new(node, a_link); \
85
+ a_list_type list; \
86
+ ql_new(&list); \
87
+ ql_head_insert(&list, node, a_link); \
88
+ a_prefix##push_batch(queue, &list); \
89
+ } \
90
+ a_attr void \
91
+ a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \
92
+ a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
93
+ if (tail == NULL) { \
94
+ /* \
95
+ * In the common special case where there are no \
96
+ * pending elements, bail early without a costly RMW. \
97
+ */ \
98
+ return; \
99
+ } \
100
+ tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \
101
+ /* \
102
+ * It's a single-consumer queue, so if cur started non-NULL, \
103
+ * it'd better stay non-NULL. \
104
+ */ \
105
+ assert(tail != NULL); \
106
+ /* \
107
+ * We iterate through the stack and both fix up the link \
108
+ * structure (stack insertion broke the list requirement that \
109
+ * the list be circularly linked). It's just as efficient at \
110
+ * this point to make the queue a "real" queue, so do that as \
111
+ * well. \
112
+ * If this ever gets to be a hot spot, we can omit this fixup \
113
+ * and make the queue a bag (i.e. not necessarily ordered), but \
114
+ * that would mean jettisoning the existing list API as the \
115
+ * batch pushing/popping interface. \
116
+ */ \
117
+ a_list_type reversed; \
118
+ ql_new(&reversed); \
119
+ while (tail != NULL) { \
120
+ /* \
121
+ * Pop an item off the stack, prepend it onto the list \
122
+ * (reversing the order). Recall that we use the \
123
+ * list prev field as the Treiber stack next field to \
124
+ * preserve order of batch-pushed items when reversed. \
125
+ */ \
126
+ a_type *next = tail->a_link.qre_prev; \
127
+ ql_elm_new(tail, a_link); \
128
+ ql_head_insert(&reversed, tail, a_link); \
129
+ tail = next; \
130
+ } \
131
+ ql_concat(dst, &reversed, a_link); \
132
+ }
133
+
134
+ #endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/mutex.h ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_MUTEX_H
2
+ #define JEMALLOC_INTERNAL_MUTEX_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/mutex_prof.h"
6
+ #include "jemalloc/internal/tsd.h"
7
+ #include "jemalloc/internal/witness.h"
8
+
9
+ extern int64_t opt_mutex_max_spin;
10
+
11
+ typedef enum {
12
+ /* Can only acquire one mutex of a given witness rank at a time. */
13
+ malloc_mutex_rank_exclusive,
14
+ /*
15
+ * Can acquire multiple mutexes of the same witness rank, but in
16
+ * address-ascending order only.
17
+ */
18
+ malloc_mutex_address_ordered
19
+ } malloc_mutex_lock_order_t;
20
+
21
+ typedef struct malloc_mutex_s malloc_mutex_t;
22
+ struct malloc_mutex_s {
23
+ union {
24
+ struct {
25
+ /*
26
+ * prof_data is defined first to reduce cacheline
27
+ * bouncing: the data is not touched by the mutex holder
28
+ * during unlocking, while might be modified by
29
+ * contenders. Having it before the mutex itself could
30
+ * avoid prefetching a modified cacheline (for the
31
+ * unlocking thread).
32
+ */
33
+ mutex_prof_data_t prof_data;
34
+ #ifdef _WIN32
35
+ # if _WIN32_WINNT >= 0x0600
36
+ SRWLOCK lock;
37
+ # else
38
+ CRITICAL_SECTION lock;
39
+ # endif
40
+ #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
41
+ os_unfair_lock lock;
42
+ #elif (defined(JEMALLOC_MUTEX_INIT_CB))
43
+ pthread_mutex_t lock;
44
+ malloc_mutex_t *postponed_next;
45
+ #else
46
+ pthread_mutex_t lock;
47
+ #endif
48
+ /*
49
+ * Hint flag to avoid exclusive cache line contention
50
+ * during spin waiting
51
+ */
52
+ atomic_b_t locked;
53
+ };
54
+ /*
55
+ * We only touch witness when configured w/ debug. However we
56
+ * keep the field in a union when !debug so that we don't have
57
+ * to pollute the code base with #ifdefs, while avoid paying the
58
+ * memory cost.
59
+ */
60
+ #if !defined(JEMALLOC_DEBUG)
61
+ witness_t witness;
62
+ malloc_mutex_lock_order_t lock_order;
63
+ #endif
64
+ };
65
+
66
+ #if defined(JEMALLOC_DEBUG)
67
+ witness_t witness;
68
+ malloc_mutex_lock_order_t lock_order;
69
+ #endif
70
+ };
71
+
72
+ #ifdef _WIN32
73
+ # if _WIN32_WINNT >= 0x0600
74
+ # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
75
+ # define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
76
+ # define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
77
+ # else
78
+ # define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
79
+ # define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
80
+ # define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
81
+ # endif
82
+ #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
83
+ # define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
84
+ # define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
85
+ # define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
86
+ #else
87
+ # define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
88
+ # define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
89
+ # define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
90
+ #endif
91
+
92
+ #define LOCK_PROF_DATA_INITIALIZER \
93
+ {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
94
+ ATOMIC_INIT(0), 0, NULL, 0}
95
+
96
+ #ifdef _WIN32
97
+ # define MALLOC_MUTEX_INITIALIZER
98
+ #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
99
+ # if defined(JEMALLOC_DEBUG)
100
+ # define MALLOC_MUTEX_INITIALIZER \
101
+ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
102
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
103
+ # else
104
+ # define MALLOC_MUTEX_INITIALIZER \
105
+ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
106
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
107
+ # endif
108
+ #elif (defined(JEMALLOC_MUTEX_INIT_CB))
109
+ # if (defined(JEMALLOC_DEBUG))
110
+ # define MALLOC_MUTEX_INITIALIZER \
111
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
112
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
113
+ # else
114
+ # define MALLOC_MUTEX_INITIALIZER \
115
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
116
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
117
+ # endif
118
+
119
+ #else
120
+ # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
121
+ # if defined(JEMALLOC_DEBUG)
122
+ # define MALLOC_MUTEX_INITIALIZER \
123
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
124
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
125
+ # else
126
+ # define MALLOC_MUTEX_INITIALIZER \
127
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
128
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
129
+ # endif
130
+ #endif
131
+
132
+ #ifdef JEMALLOC_LAZY_LOCK
133
+ extern bool isthreaded;
134
+ #else
135
+ # undef isthreaded /* Undo private_namespace.h definition. */
136
+ # define isthreaded true
137
+ #endif
138
+
139
+ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
140
+ witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
141
+ void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
142
+ void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
143
+ void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
144
+ bool malloc_mutex_boot(void);
145
+ void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
146
+
147
+ void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
148
+
149
+ static inline void
150
+ malloc_mutex_lock_final(malloc_mutex_t *mutex) {
151
+ MALLOC_MUTEX_LOCK(mutex);
152
+ atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
153
+ }
154
+
155
+ static inline bool
156
+ malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
157
+ return MALLOC_MUTEX_TRYLOCK(mutex);
158
+ }
159
+
160
+ static inline void
161
+ mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
162
+ if (config_stats) {
163
+ mutex_prof_data_t *data = &mutex->prof_data;
164
+ data->n_lock_ops++;
165
+ if (data->prev_owner != tsdn) {
166
+ data->prev_owner = tsdn;
167
+ data->n_owner_switches++;
168
+ }
169
+ }
170
+ }
171
+
172
+ /* Trylock: return false if the lock is successfully acquired. */
173
+ static inline bool
174
+ malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
175
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
176
+ if (isthreaded) {
177
+ if (malloc_mutex_trylock_final(mutex)) {
178
+ atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
179
+ return true;
180
+ }
181
+ mutex_owner_stats_update(tsdn, mutex);
182
+ }
183
+ witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
184
+
185
+ return false;
186
+ }
187
+
188
+ /* Aggregate lock prof data. */
189
+ static inline void
190
+ malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
191
+ nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
192
+ if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
193
+ nstime_copy(&sum->max_wait_time, &data->max_wait_time);
194
+ }
195
+
196
+ sum->n_wait_times += data->n_wait_times;
197
+ sum->n_spin_acquired += data->n_spin_acquired;
198
+
199
+ if (sum->max_n_thds < data->max_n_thds) {
200
+ sum->max_n_thds = data->max_n_thds;
201
+ }
202
+ uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
203
+ ATOMIC_RELAXED);
204
+ uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
205
+ &data->n_waiting_thds, ATOMIC_RELAXED);
206
+ atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
207
+ ATOMIC_RELAXED);
208
+ sum->n_owner_switches += data->n_owner_switches;
209
+ sum->n_lock_ops += data->n_lock_ops;
210
+ }
211
+
212
+ static inline void
213
+ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
214
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
215
+ if (isthreaded) {
216
+ if (malloc_mutex_trylock_final(mutex)) {
217
+ malloc_mutex_lock_slow(mutex);
218
+ atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
219
+ }
220
+ mutex_owner_stats_update(tsdn, mutex);
221
+ }
222
+ witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
223
+ }
224
+
225
+ static inline void
226
+ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
227
+ atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
228
+ witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
229
+ if (isthreaded) {
230
+ MALLOC_MUTEX_UNLOCK(mutex);
231
+ }
232
+ }
233
+
234
+ static inline void
235
+ malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
236
+ witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
237
+ }
238
+
239
+ static inline void
240
+ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
241
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
242
+ }
243
+
244
+ static inline void
245
+ malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
246
+ /*
247
+ * Not *really* allowed (we shouldn't be doing non-atomic loads of
248
+ * atomic data), but the mutex protection makes this safe, and writing
249
+ * a member-for-member copy is tedious for this situation.
250
+ */
251
+ *dst = *source;
252
+ /* n_wait_thds is not reported (modified w/o locking). */
253
+ atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
254
+ }
255
+
256
+ /* Copy the prof data from mutex for processing. */
257
+ static inline void
258
+ malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
259
+ malloc_mutex_t *mutex) {
260
+ /* Can only read holding the mutex. */
261
+ malloc_mutex_assert_owner(tsdn, mutex);
262
+ malloc_mutex_prof_copy(data, &mutex->prof_data);
263
+ }
264
+
265
+ static inline void
266
+ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
267
+ malloc_mutex_t *mutex) {
268
+ mutex_prof_data_t *source = &mutex->prof_data;
269
+ /* Can only read holding the mutex. */
270
+ malloc_mutex_assert_owner(tsdn, mutex);
271
+
272
+ nstime_add(&data->tot_wait_time, &source->tot_wait_time);
273
+ if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
274
+ nstime_copy(&data->max_wait_time, &source->max_wait_time);
275
+ }
276
+ data->n_wait_times += source->n_wait_times;
277
+ data->n_spin_acquired += source->n_spin_acquired;
278
+ if (data->max_n_thds < source->max_n_thds) {
279
+ data->max_n_thds = source->max_n_thds;
280
+ }
281
+ /* n_wait_thds is not reported. */
282
+ atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
283
+ data->n_owner_switches += source->n_owner_switches;
284
+ data->n_lock_ops += source->n_lock_ops;
285
+ }
286
+
287
+ /* Compare the prof data and update to the maximum. */
288
+ static inline void
289
+ malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
290
+ malloc_mutex_t *mutex) {
291
+ mutex_prof_data_t *source = &mutex->prof_data;
292
+ /* Can only read holding the mutex. */
293
+ malloc_mutex_assert_owner(tsdn, mutex);
294
+
295
+ if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
296
+ nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
297
+ }
298
+ if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
299
+ nstime_copy(&data->max_wait_time, &source->max_wait_time);
300
+ }
301
+ if (source->n_wait_times > data->n_wait_times) {
302
+ data->n_wait_times = source->n_wait_times;
303
+ }
304
+ if (source->n_spin_acquired > data->n_spin_acquired) {
305
+ data->n_spin_acquired = source->n_spin_acquired;
306
+ }
307
+ if (source->max_n_thds > data->max_n_thds) {
308
+ data->max_n_thds = source->max_n_thds;
309
+ }
310
+ if (source->n_owner_switches > data->n_owner_switches) {
311
+ data->n_owner_switches = source->n_owner_switches;
312
+ }
313
+ if (source->n_lock_ops > data->n_lock_ops) {
314
+ data->n_lock_ops = source->n_lock_ops;
315
+ }
316
+ /* n_wait_thds is not reported. */
317
+ }
318
+
319
+ #endif /* JEMALLOC_INTERNAL_MUTEX_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/mutex_prof.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
2
+ #define JEMALLOC_INTERNAL_MUTEX_PROF_H
3
+
4
+ #include "jemalloc/internal/atomic.h"
5
+ #include "jemalloc/internal/nstime.h"
6
+ #include "jemalloc/internal/tsd_types.h"
7
+
8
+ #define MUTEX_PROF_GLOBAL_MUTEXES \
9
+ OP(background_thread) \
10
+ OP(max_per_bg_thd) \
11
+ OP(ctl) \
12
+ OP(prof) \
13
+ OP(prof_thds_data) \
14
+ OP(prof_dump) \
15
+ OP(prof_recent_alloc) \
16
+ OP(prof_recent_dump) \
17
+ OP(prof_stats)
18
+
19
+ typedef enum {
20
+ #define OP(mtx) global_prof_mutex_##mtx,
21
+ MUTEX_PROF_GLOBAL_MUTEXES
22
+ #undef OP
23
+ mutex_prof_num_global_mutexes
24
+ } mutex_prof_global_ind_t;
25
+
26
+ #define MUTEX_PROF_ARENA_MUTEXES \
27
+ OP(large) \
28
+ OP(extent_avail) \
29
+ OP(extents_dirty) \
30
+ OP(extents_muzzy) \
31
+ OP(extents_retained) \
32
+ OP(decay_dirty) \
33
+ OP(decay_muzzy) \
34
+ OP(base) \
35
+ OP(tcache_list) \
36
+ OP(hpa_shard) \
37
+ OP(hpa_shard_grow) \
38
+ OP(hpa_sec)
39
+
40
+ typedef enum {
41
+ #define OP(mtx) arena_prof_mutex_##mtx,
42
+ MUTEX_PROF_ARENA_MUTEXES
43
+ #undef OP
44
+ mutex_prof_num_arena_mutexes
45
+ } mutex_prof_arena_ind_t;
46
+
47
+ /*
48
+ * The forth parameter is a boolean value that is true for derived rate counters
49
+ * and false for real ones.
50
+ */
51
+ #define MUTEX_PROF_UINT64_COUNTERS \
52
+ OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
53
+ OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
54
+ OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
55
+ OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
56
+ OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
57
+ OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
58
+ OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \
59
+ OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
60
+ OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
61
+ OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
62
+ OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
63
+
64
+ #define MUTEX_PROF_UINT32_COUNTERS \
65
+ OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
66
+
67
+ #define MUTEX_PROF_COUNTERS \
68
+ MUTEX_PROF_UINT64_COUNTERS \
69
+ MUTEX_PROF_UINT32_COUNTERS
70
+
71
+ #define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
72
+
73
+ #define COUNTER_ENUM(counter_list, t) \
74
+ typedef enum { \
75
+ counter_list \
76
+ mutex_prof_num_##t##_counters \
77
+ } mutex_prof_##t##_counter_ind_t;
78
+
79
+ COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
80
+ COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
81
+
82
+ #undef COUNTER_ENUM
83
+ #undef OP
84
+
85
+ typedef struct {
86
+ /*
87
+ * Counters touched on the slow path, i.e. when there is lock
88
+ * contention. We update them once we have the lock.
89
+ */
90
+ /* Total time (in nano seconds) spent waiting on this mutex. */
91
+ nstime_t tot_wait_time;
92
+ /* Max time (in nano seconds) spent on a single lock operation. */
93
+ nstime_t max_wait_time;
94
+ /* # of times have to wait for this mutex (after spinning). */
95
+ uint64_t n_wait_times;
96
+ /* # of times acquired the mutex through local spinning. */
97
+ uint64_t n_spin_acquired;
98
+ /* Max # of threads waiting for the mutex at the same time. */
99
+ uint32_t max_n_thds;
100
+ /* Current # of threads waiting on the lock. Atomic synced. */
101
+ atomic_u32_t n_waiting_thds;
102
+
103
+ /*
104
+ * Data touched on the fast path. These are modified right after we
105
+ * grab the lock, so it's placed closest to the end (i.e. right before
106
+ * the lock) so that we have a higher chance of them being on the same
107
+ * cacheline.
108
+ */
109
+ /* # of times the mutex holder is different than the previous one. */
110
+ uint64_t n_owner_switches;
111
+ /* Previous mutex holder, to facilitate n_owner_switches. */
112
+ tsdn_t *prev_owner;
113
+ /* # of lock() operations in total. */
114
+ uint64_t n_lock_ops;
115
+ } mutex_prof_data_t;
116
+
117
+ #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/nstime.h ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_NSTIME_H
2
+ #define JEMALLOC_INTERNAL_NSTIME_H
3
+
4
+ /* Maximum supported number of seconds (~584 years). */
5
+ #define NSTIME_SEC_MAX KQU(18446744072)
6
+
7
+ #define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
8
+ #ifdef JEMALLOC_DEBUG
9
+ # define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
10
+ #else
11
+ # define NSTIME_ZERO_INITIALIZER {0}
12
+ #endif
13
+
14
+ typedef struct {
15
+ uint64_t ns;
16
+ #ifdef JEMALLOC_DEBUG
17
+ uint32_t magic; /* Tracks if initialized. */
18
+ #endif
19
+ } nstime_t;
20
+
21
+ static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
22
+
23
+ void nstime_init(nstime_t *time, uint64_t ns);
24
+ void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
25
+ uint64_t nstime_ns(const nstime_t *time);
26
+ uint64_t nstime_sec(const nstime_t *time);
27
+ uint64_t nstime_msec(const nstime_t *time);
28
+ uint64_t nstime_nsec(const nstime_t *time);
29
+ void nstime_copy(nstime_t *time, const nstime_t *source);
30
+ int nstime_compare(const nstime_t *a, const nstime_t *b);
31
+ void nstime_add(nstime_t *time, const nstime_t *addend);
32
+ void nstime_iadd(nstime_t *time, uint64_t addend);
33
+ void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
34
+ void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
35
+ void nstime_imultiply(nstime_t *time, uint64_t multiplier);
36
+ void nstime_idivide(nstime_t *time, uint64_t divisor);
37
+ uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
38
+ uint64_t nstime_ns_since(const nstime_t *past);
39
+
40
+ typedef bool (nstime_monotonic_t)(void);
41
+ extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
42
+
43
+ typedef void (nstime_update_t)(nstime_t *);
44
+ extern nstime_update_t *JET_MUTABLE nstime_update;
45
+
46
+ typedef void (nstime_prof_update_t)(nstime_t *);
47
+ extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update;
48
+
49
+ void nstime_init_update(nstime_t *time);
50
+ void nstime_prof_init_update(nstime_t *time);
51
+
52
+ enum prof_time_res_e {
53
+ prof_time_res_default = 0,
54
+ prof_time_res_high = 1
55
+ };
56
+ typedef enum prof_time_res_e prof_time_res_t;
57
+
58
+ extern prof_time_res_t opt_prof_time_res;
59
+ extern const char *prof_time_res_mode_names[];
60
+
61
+ JEMALLOC_ALWAYS_INLINE void
62
+ nstime_init_zero(nstime_t *time) {
63
+ nstime_copy(time, &nstime_zero);
64
+ }
65
+
66
+ JEMALLOC_ALWAYS_INLINE bool
67
+ nstime_equals_zero(nstime_t *time) {
68
+ int diff = nstime_compare(time, &nstime_zero);
69
+ assert(diff >= 0);
70
+ return diff == 0;
71
+ }
72
+
73
+ #endif /* JEMALLOC_INTERNAL_NSTIME_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pa.h ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_PA_H
2
+ #define JEMALLOC_INTERNAL_PA_H
3
+
4
+ #include "jemalloc/internal/base.h"
5
+ #include "jemalloc/internal/decay.h"
6
+ #include "jemalloc/internal/ecache.h"
7
+ #include "jemalloc/internal/edata_cache.h"
8
+ #include "jemalloc/internal/emap.h"
9
+ #include "jemalloc/internal/hpa.h"
10
+ #include "jemalloc/internal/lockedint.h"
11
+ #include "jemalloc/internal/pac.h"
12
+ #include "jemalloc/internal/pai.h"
13
+ #include "jemalloc/internal/sec.h"
14
+
15
+ /*
16
+ * The page allocator; responsible for acquiring pages of memory for
17
+ * allocations. It picks the implementation of the page allocator interface
18
+ * (i.e. a pai_t) to handle a given page-level allocation request. For now, the
19
+ * only such implementation is the PAC code ("page allocator classic"), but
20
+ * others will be coming soon.
21
+ */
22
+
23
+ typedef struct pa_central_s pa_central_t;
24
+ struct pa_central_s {
25
+ hpa_central_t hpa;
26
+ };
27
+
28
+ /*
29
+ * The stats for a particular pa_shard. Because of the way the ctl module
30
+ * handles stats epoch data collection (it has its own arena_stats, and merges
31
+ * the stats from each arena into it), this needs to live in the arena_stats_t;
32
+ * hence we define it here and let the pa_shard have a pointer (rather than the
33
+ * more natural approach of just embedding it in the pa_shard itself).
34
+ *
35
+ * We follow the arena_stats_t approach of marking the derived fields. These
36
+ * are the ones that are not maintained on their own; instead, their values are
37
+ * derived during those stats merges.
38
+ */
39
+ typedef struct pa_shard_stats_s pa_shard_stats_t;
40
+ struct pa_shard_stats_s {
41
+ /* Number of edata_t structs allocated by base, but not being used. */
42
+ size_t edata_avail; /* Derived. */
43
+ /*
44
+ * Stats specific to the PAC. For now, these are the only stats that
45
+ * exist, but there will eventually be other page allocators. Things
46
+ * like edata_avail make sense in a cross-PA sense, but things like
47
+ * npurges don't.
48
+ */
49
+ pac_stats_t pac_stats;
50
+ };
51
+
52
+ /*
53
+ * The local allocator handle. Keeps the state necessary to satisfy page-sized
54
+ * allocations.
55
+ *
56
+ * The contents are mostly internal to the PA module. The key exception is that
57
+ * arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
58
+ * decay_ts, for a couple of queries, passing them back to a PA function, or
59
+ * acquiring decay.mtx and looking at decay.purging. The reasoning is that,
60
+ * while PA decides what and how to purge, the arena code decides when and where
61
+ * (e.g. on what thread). It's allowed to use the presence of another purger to
62
+ * decide.
63
+ * (The background thread code also touches some other decay internals, but
64
+ * that's not fundamental; its' just an artifact of a partial refactoring, and
65
+ * its accesses could be straightforwardly moved inside the decay module).
66
+ */
67
+ typedef struct pa_shard_s pa_shard_t;
68
+ struct pa_shard_s {
69
+ /* The central PA this shard is associated with. */
70
+ pa_central_t *central;
71
+
72
+ /*
73
+ * Number of pages in active extents.
74
+ *
75
+ * Synchronization: atomic.
76
+ */
77
+ atomic_zu_t nactive;
78
+
79
+ /*
80
+ * Whether or not we should prefer the hugepage allocator. Atomic since
81
+ * it may be concurrently modified by a thread setting extent hooks.
82
+ * Note that we still may do HPA operations in this arena; if use_hpa is
83
+ * changed from true to false, we'll free back to the hugepage allocator
84
+ * for those allocations.
85
+ */
86
+ atomic_b_t use_hpa;
87
+
88
+ /*
89
+ * If we never used the HPA to begin with, it wasn't initialized, and so
90
+ * we shouldn't try to e.g. acquire its mutexes during fork. This
91
+ * tracks that knowledge.
92
+ */
93
+ bool ever_used_hpa;
94
+
95
+ /* Allocates from a PAC. */
96
+ pac_t pac;
97
+
98
+ /*
99
+ * We place a small extent cache in front of the HPA, since we intend
100
+ * these configurations to use many fewer arenas, and therefore have a
101
+ * higher risk of hot locks.
102
+ */
103
+ sec_t hpa_sec;
104
+ hpa_shard_t hpa_shard;
105
+
106
+ /* The source of edata_t objects. */
107
+ edata_cache_t edata_cache;
108
+
109
+ unsigned ind;
110
+
111
+ malloc_mutex_t *stats_mtx;
112
+ pa_shard_stats_t *stats;
113
+
114
+ /* The emap this shard is tied to. */
115
+ emap_t *emap;
116
+
117
+ /* The base from which we get the ehooks and allocate metadat. */
118
+ base_t *base;
119
+ };
120
+
121
+ static inline bool
122
+ pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
123
+ return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
124
+ pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
125
+ }
126
+
127
+ static inline ehooks_t *
128
+ pa_shard_ehooks_get(pa_shard_t *shard) {
129
+ return base_ehooks_get(shard->base);
130
+ }
131
+
132
+ /* Returns true on error. */
133
+ bool pa_central_init(pa_central_t *central, base_t *base, bool hpa,
134
+ hpa_hooks_t *hpa_hooks);
135
+
136
+ /* Returns true on error. */
137
+ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
138
+ emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
139
+ malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
140
+ ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
141
+
142
+ /*
143
+ * This isn't exposed to users; we allow late enablement of the HPA shard so
144
+ * that we can boot without worrying about the HPA, then turn it on in a0.
145
+ */
146
+ bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
147
+ const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts);
148
+
149
+ /*
150
+ * We stop using the HPA when custom extent hooks are installed, but still
151
+ * redirect deallocations to it.
152
+ */
153
+ void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard);
154
+
155
+ /*
156
+ * This does the PA-specific parts of arena reset (i.e. freeing all active
157
+ * allocations).
158
+ */
159
+ void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard);
160
+
161
+ /*
162
+ * Destroy all the remaining retained extents. Should only be called after
163
+ * decaying all active, dirty, and muzzy extents to the retained state, as the
164
+ * last step in destroying the shard.
165
+ */
166
+ void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
167
+
168
+ /* Gets an edata for the given allocation. */
169
+ edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
170
+ size_t alignment, bool slab, szind_t szind, bool zero, bool guarded,
171
+ bool *deferred_work_generated);
172
+ /* Returns true on error, in which case nothing changed. */
173
+ bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
174
+ size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated);
175
+ /*
176
+ * The same. Sets *generated_dirty to true if we produced new dirty pages, and
177
+ * false otherwise.
178
+ */
179
+ bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
180
+ size_t new_size, szind_t szind, bool *deferred_work_generated);
181
+ /*
182
+ * Frees the given edata back to the pa. Sets *generated_dirty if we produced
183
+ * new dirty pages (well, we always set it for now; but this need not be the
184
+ * case).
185
+ * (We could make generated_dirty the return value of course, but this is more
186
+ * consistent with the shrink pathway and our error codes here).
187
+ */
188
+ void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
189
+ bool *deferred_work_generated);
190
+ bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
191
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness);
192
+ ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
193
+
194
+ /*
195
+ * Do deferred work on this PA shard.
196
+ *
197
+ * Morally, this should do both PAC decay and the HPA deferred work. For now,
198
+ * though, the arena, background thread, and PAC modules are tightly interwoven
199
+ * in a way that's tricky to extricate, so we only do the HPA-specific parts.
200
+ */
201
+ void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
202
+ bool deferral_allowed);
203
+ void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
204
+ void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
205
+ uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
206
+
207
+ /******************************************************************************/
208
+ /*
209
+ * Various bits of "boring" functionality that are still part of this module,
210
+ * but that we relegate to pa_extra.c, to keep the core logic in pa.c as
211
+ * readable as possible.
212
+ */
213
+
214
+ /*
215
+ * These fork phases are synchronized with the arena fork phase numbering to
216
+ * make it easy to keep straight. That's why there's no prefork1.
217
+ */
218
+ void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard);
219
+ void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard);
220
+ void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard);
221
+ void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard);
222
+ void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
223
+ void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
224
+ void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
225
+
226
+ void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
227
+ size_t *ndirty, size_t *nmuzzy);
228
+
229
+ void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
230
+ pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
231
+ hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
232
+ size_t *resident);
233
+
234
+ /*
235
+ * Reads the PA-owned mutex stats into the output stats array, at the
236
+ * appropriate positions. Morally, these stats should really live in
237
+ * pa_shard_stats_t, but the indices are sort of baked into the various mutex
238
+ * prof macros. This would be a good thing to do at some point.
239
+ */
240
+ void pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
241
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]);
242
+
243
+ #endif /* JEMALLOC_INTERNAL_PA_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pac.h ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_PAC_H
2
+ #define JEMALLOC_INTERNAL_PAC_H
3
+
4
+ #include "jemalloc/internal/exp_grow.h"
5
+ #include "jemalloc/internal/pai.h"
6
+ #include "san_bump.h"
7
+
8
+
9
+ /*
10
+ * Page allocator classic; an implementation of the PAI interface that:
11
+ * - Can be used for arenas with custom extent hooks.
12
+ * - Can always satisfy any allocation request (including highly-fragmentary
13
+ * ones).
14
+ * - Can use efficient OS-level zeroing primitives for demand-filled pages.
15
+ */
16
+
17
+ /* How "eager" decay/purging should be. */
18
+ enum pac_purge_eagerness_e {
19
+ PAC_PURGE_ALWAYS,
20
+ PAC_PURGE_NEVER,
21
+ PAC_PURGE_ON_EPOCH_ADVANCE
22
+ };
23
+ typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
24
+
25
+ typedef struct pac_decay_stats_s pac_decay_stats_t;
26
+ struct pac_decay_stats_s {
27
+ /* Total number of purge sweeps. */
28
+ locked_u64_t npurge;
29
+ /* Total number of madvise calls made. */
30
+ locked_u64_t nmadvise;
31
+ /* Total number of pages purged. */
32
+ locked_u64_t purged;
33
+ };
34
+
35
+ typedef struct pac_estats_s pac_estats_t;
36
+ struct pac_estats_s {
37
+ /*
38
+ * Stats for a given index in the range [0, SC_NPSIZES] in the various
39
+ * ecache_ts.
40
+ * We track both bytes and # of extents: two extents in the same bucket
41
+ * may have different sizes if adjacent size classes differ by more than
42
+ * a page, so bytes cannot always be derived from # of extents.
43
+ */
44
+ size_t ndirty;
45
+ size_t dirty_bytes;
46
+ size_t nmuzzy;
47
+ size_t muzzy_bytes;
48
+ size_t nretained;
49
+ size_t retained_bytes;
50
+ };
51
+
52
+ typedef struct pac_stats_s pac_stats_t;
53
+ struct pac_stats_s {
54
+ pac_decay_stats_t decay_dirty;
55
+ pac_decay_stats_t decay_muzzy;
56
+
57
+ /*
58
+ * Number of unused virtual memory bytes currently retained. Retained
59
+ * bytes are technically mapped (though always decommitted or purged),
60
+ * but they are excluded from the mapped statistic (above).
61
+ */
62
+ size_t retained; /* Derived. */
63
+
64
+ /*
65
+ * Number of bytes currently mapped, excluding retained memory (and any
66
+ * base-allocated memory, which is tracked by the arena stats).
67
+ *
68
+ * We name this "pac_mapped" to avoid confusion with the arena_stats
69
+ * "mapped".
70
+ */
71
+ atomic_zu_t pac_mapped;
72
+
73
+ /* VM space had to be leaked (undocumented). Normally 0. */
74
+ atomic_zu_t abandoned_vm;
75
+ };
76
+
77
+ typedef struct pac_s pac_t;
78
+ struct pac_s {
79
+ /*
80
+ * Must be the first member (we convert it to a PAC given only a
81
+ * pointer). The handle to the allocation interface.
82
+ */
83
+ pai_t pai;
84
+ /*
85
+ * Collections of extents that were previously allocated. These are
86
+ * used when allocating extents, in an attempt to re-use address space.
87
+ *
88
+ * Synchronization: internal.
89
+ */
90
+ ecache_t ecache_dirty;
91
+ ecache_t ecache_muzzy;
92
+ ecache_t ecache_retained;
93
+
94
+ base_t *base;
95
+ emap_t *emap;
96
+ edata_cache_t *edata_cache;
97
+
98
+ /* The grow info for the retained ecache. */
99
+ exp_grow_t exp_grow;
100
+ malloc_mutex_t grow_mtx;
101
+
102
+ /* Special allocator for guarded frequently reused extents. */
103
+ san_bump_alloc_t sba;
104
+
105
+ /* How large extents should be before getting auto-purged. */
106
+ atomic_zu_t oversize_threshold;
107
+
108
+ /*
109
+ * Decay-based purging state, responsible for scheduling extent state
110
+ * transitions.
111
+ *
112
+ * Synchronization: via the internal mutex.
113
+ */
114
+ decay_t decay_dirty; /* dirty --> muzzy */
115
+ decay_t decay_muzzy; /* muzzy --> retained */
116
+
117
+ malloc_mutex_t *stats_mtx;
118
+ pac_stats_t *stats;
119
+
120
+ /* Extent serial number generator state. */
121
+ atomic_zu_t extent_sn_next;
122
+ };
123
+
124
+ bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
125
+ edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
126
+ ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
127
+ malloc_mutex_t *stats_mtx);
128
+
129
+ static inline size_t
130
+ pac_mapped(pac_t *pac) {
131
+ return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
132
+ }
133
+
134
+ static inline ehooks_t *
135
+ pac_ehooks_get(pac_t *pac) {
136
+ return base_ehooks_get(pac->base);
137
+ }
138
+
139
+ /*
140
+ * All purging functions require holding decay->mtx. This is one of the few
141
+ * places external modules are allowed to peek inside pa_shard_t internals.
142
+ */
143
+
144
+ /*
145
+ * Decays the number of pages currently in the ecache. This might not leave the
146
+ * ecache empty if other threads are inserting dirty objects into it
147
+ * concurrently with the call.
148
+ */
149
+ void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
150
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
151
+ /*
152
+ * Updates decay settings for the current time, and conditionally purges in
153
+ * response (depending on decay_purge_setting). Returns whether or not the
154
+ * epoch advanced.
155
+ */
156
+ bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
157
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
158
+ pac_purge_eagerness_t eagerness);
159
+
160
+ /*
161
+ * Gets / sets the maximum amount that we'll grow an arena down the
162
+ * grow-retained pathways (unless forced to by an allocaction request).
163
+ *
164
+ * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
165
+ * care about the previous value.
166
+ *
167
+ * Returns true on error (if the new limit is not valid).
168
+ */
169
+ bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
170
+ size_t *new_limit);
171
+
172
+ bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
173
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness);
174
+ ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
175
+
176
+ void pac_reset(tsdn_t *tsdn, pac_t *pac);
177
+ void pac_destroy(tsdn_t *tsdn, pac_t *pac);
178
+
179
+ #endif /* JEMALLOC_INTERNAL_PAC_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pages.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
2
+ #define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
3
+
4
+ /* Page size. LG_PAGE is determined by the configure script. */
5
+ #ifdef PAGE_MASK
6
+ # undef PAGE_MASK
7
+ #endif
8
+ #define PAGE ((size_t)(1U << LG_PAGE))
9
+ #define PAGE_MASK ((size_t)(PAGE - 1))
10
+ /* Return the page base address for the page containing address a. */
11
+ #define PAGE_ADDR2BASE(a) \
12
+ ((void *)((uintptr_t)(a) & ~PAGE_MASK))
13
+ /* Return the smallest pagesize multiple that is >= s. */
14
+ #define PAGE_CEILING(s) \
15
+ (((s) + PAGE_MASK) & ~PAGE_MASK)
16
+ /* Return the largest pagesize multiple that is <=s. */
17
+ #define PAGE_FLOOR(s) \
18
+ ((s) & ~PAGE_MASK)
19
+
20
+ /* Huge page size. LG_HUGEPAGE is determined by the configure script. */
21
+ #define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
22
+ #define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
23
+
24
+ #if LG_HUGEPAGE != 0
25
+ # define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
26
+ #else
27
+ /*
28
+ * It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
29
+ * we can't autodetect the hugepage size, it gets treated as 0, in which case
30
+ * we'll trigger a compiler error in those arrays. Avoid this case by ensuring
31
+ * that this value is at least 1. (We won't ever run in this degraded state;
32
+ * hpa_supported() returns false in this case.
33
+ */
34
+ # define HUGEPAGE_PAGES 1
35
+ #endif
36
+
37
+ /* Return the huge page base address for the huge page containing address a. */
38
+ #define HUGEPAGE_ADDR2BASE(a) \
39
+ ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
40
+ /* Return the smallest pagesize multiple that is >= s. */
41
+ #define HUGEPAGE_CEILING(s) \
42
+ (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
43
+
44
+ /* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
45
+ #if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
46
+ # define PAGES_CAN_PURGE_LAZY
47
+ #endif
48
+ /*
49
+ * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
50
+ *
51
+ * The only supported way to hard-purge on Windows is to decommit and then
52
+ * re-commit, but doing so is racy, and if re-commit fails it's a pain to
53
+ * propagate the "poisoned" memory state. Since we typically decommit as the
54
+ * next step after purging on Windows anyway, there's no point in adding such
55
+ * complexity.
56
+ */
57
+ #if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
58
+ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
59
+ defined(JEMALLOC_MAPS_COALESCE))
60
+ # define PAGES_CAN_PURGE_FORCED
61
+ #endif
62
+
63
+ static const bool pages_can_purge_lazy =
64
+ #ifdef PAGES_CAN_PURGE_LAZY
65
+ true
66
+ #else
67
+ false
68
+ #endif
69
+ ;
70
+ static const bool pages_can_purge_forced =
71
+ #ifdef PAGES_CAN_PURGE_FORCED
72
+ true
73
+ #else
74
+ false
75
+ #endif
76
+ ;
77
+
78
+ #if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
79
+ # define PAGES_CAN_HUGIFY
80
+ #endif
81
+
82
+ static const bool pages_can_hugify =
83
+ #ifdef PAGES_CAN_HUGIFY
84
+ true
85
+ #else
86
+ false
87
+ #endif
88
+ ;
89
+
90
+ typedef enum {
91
+ thp_mode_default = 0, /* Do not change hugepage settings. */
92
+ thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
93
+ thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
94
+
95
+ thp_mode_names_limit = 3, /* Used for option processing. */
96
+ thp_mode_not_supported = 3 /* No THP support detected. */
97
+ } thp_mode_t;
98
+
99
+ #define THP_MODE_DEFAULT thp_mode_default
100
+ extern thp_mode_t opt_thp;
101
+ extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
102
+ extern const char *thp_mode_names[];
103
+
104
+ void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
105
+ void pages_unmap(void *addr, size_t size);
106
+ bool pages_commit(void *addr, size_t size);
107
+ bool pages_decommit(void *addr, size_t size);
108
+ bool pages_purge_lazy(void *addr, size_t size);
109
+ bool pages_purge_forced(void *addr, size_t size);
110
+ bool pages_huge(void *addr, size_t size);
111
+ bool pages_nohuge(void *addr, size_t size);
112
+ bool pages_dontdump(void *addr, size_t size);
113
+ bool pages_dodump(void *addr, size_t size);
114
+ bool pages_boot(void);
115
+ void pages_set_thp_state (void *ptr, size_t size);
116
+ void pages_mark_guards(void *head, void *tail);
117
+ void pages_unmark_guards(void *head, void *tail);
118
+
119
+ #endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/pai.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_PAI_H
2
+ #define JEMALLOC_INTERNAL_PAI_H
3
+
4
+ /* An interface for page allocation. */
5
+
6
+ typedef struct pai_s pai_t;
7
+ struct pai_s {
8
+ /* Returns NULL on failure. */
9
+ edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
10
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
11
+ bool *deferred_work_generated);
12
+ /*
13
+ * Returns the number of extents added to the list (which may be fewer
14
+ * than requested, in case of OOM). The list should already be
15
+ * initialized. The only alignment guarantee is page-alignment, and
16
+ * the results are not necessarily zeroed.
17
+ */
18
+ size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
19
+ size_t nallocs, edata_list_active_t *results,
20
+ bool *deferred_work_generated);
21
+ bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
22
+ size_t old_size, size_t new_size, bool zero,
23
+ bool *deferred_work_generated);
24
+ bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
25
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
26
+ void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
27
+ bool *deferred_work_generated);
28
+ /* This function empties out list as a side-effect of being called. */
29
+ void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
30
+ edata_list_active_t *list, bool *deferred_work_generated);
31
+ uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
32
+ };
33
+
34
+ /*
35
+ * These are just simple convenience functions to avoid having to reference the
36
+ * same pai_t twice on every invocation.
37
+ */
38
+
39
+ static inline edata_t *
40
+ pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
41
+ bool zero, bool guarded, bool frequent_reuse,
42
+ bool *deferred_work_generated) {
43
+ return self->alloc(tsdn, self, size, alignment, zero, guarded,
44
+ frequent_reuse, deferred_work_generated);
45
+ }
46
+
47
+ static inline size_t
48
+ pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
49
+ edata_list_active_t *results, bool *deferred_work_generated) {
50
+ return self->alloc_batch(tsdn, self, size, nallocs, results,
51
+ deferred_work_generated);
52
+ }
53
+
54
+ static inline bool
55
+ pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
56
+ size_t new_size, bool zero, bool *deferred_work_generated) {
57
+ return self->expand(tsdn, self, edata, old_size, new_size, zero,
58
+ deferred_work_generated);
59
+ }
60
+
61
+ static inline bool
62
+ pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
63
+ size_t new_size, bool *deferred_work_generated) {
64
+ return self->shrink(tsdn, self, edata, old_size, new_size,
65
+ deferred_work_generated);
66
+ }
67
+
68
+ static inline void
69
+ pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
70
+ bool *deferred_work_generated) {
71
+ self->dalloc(tsdn, self, edata, deferred_work_generated);
72
+ }
73
+
74
+ static inline void
75
+ pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
76
+ bool *deferred_work_generated) {
77
+ self->dalloc_batch(tsdn, self, list, deferred_work_generated);
78
+ }
79
+
80
+ static inline uint64_t
81
+ pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
82
+ return self->time_until_deferred_work(tsdn, self);
83
+ }
84
+
85
+ /*
86
+ * An implementation of batch allocation that simply calls alloc once for
87
+ * each item in the list.
88
+ */
89
+ size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
90
+ size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
91
+ /* Ditto, for dalloc. */
92
+ void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
93
+ edata_list_active_t *list, bool *deferred_work_generated);
94
+
95
+ #endif /* JEMALLOC_INTERNAL_PAI_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/peak.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_PEAK_H
2
+ #define JEMALLOC_INTERNAL_PEAK_H
3
+
4
+ typedef struct peak_s peak_t;
5
+ struct peak_s {
6
+ /* The highest recorded peak value, after adjustment (see below). */
7
+ uint64_t cur_max;
8
+ /*
9
+ * The difference between alloc and dalloc at the last set_zero call;
10
+ * this lets us cancel out the appropriate amount of excess.
11
+ */
12
+ uint64_t adjustment;
13
+ };
14
+
15
+ #define PEAK_INITIALIZER {0, 0}
16
+
17
+ static inline uint64_t
18
+ peak_max(peak_t *peak) {
19
+ return peak->cur_max;
20
+ }
21
+
22
+ static inline void
23
+ peak_update(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
24
+ int64_t candidate_max = (int64_t)(alloc - dalloc - peak->adjustment);
25
+ if (candidate_max > (int64_t)peak->cur_max) {
26
+ peak->cur_max = candidate_max;
27
+ }
28
+ }
29
+
30
+ /* Resets the counter to zero; all peaks are now relative to this point. */
31
+ static inline void
32
+ peak_set_zero(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
33
+ peak->cur_max = 0;
34
+ peak->adjustment = alloc - dalloc;
35
+ }
36
+
37
+ #endif /* JEMALLOC_INTERNAL_PEAK_H */
platform/dbops/binaries/redis/src/deps/jemalloc/include/jemalloc/internal/peak_event.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
2
+ #define JEMALLOC_INTERNAL_PEAK_EVENT_H
3
+
4
+ /*
5
+ * While peak.h contains the simple helper struct that tracks state, this
6
+ * contains the allocator tie-ins (and knows about tsd, the event module, etc.).
7
+ */
8
+
9
+ /* Update the peak with current tsd state. */
10
+ void peak_event_update(tsd_t *tsd);
11
+ /* Set current state to zero. */
12
+ void peak_event_zero(tsd_t *tsd);
13
+ uint64_t peak_event_max(tsd_t *tsd);
14
+
15
+ /* Manual hooks. */
16
+ /* The activity-triggered hooks. */
17
+ uint64_t peak_alloc_new_event_wait(tsd_t *tsd);
18
+ uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd);
19
+ void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed);
20
+ uint64_t peak_dalloc_new_event_wait(tsd_t *tsd);
21
+ uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd);
22
+ void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
23
+
24
+ #endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */