title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Remove NpyDateTimeToEpoch | diff --git a/pandas/_libs/include/pandas/datetime/date_conversions.h b/pandas/_libs/include/pandas/datetime/date_conversions.h
index 42a16f33cc2ea..9a4a02ea89b4d 100644
--- a/pandas/_libs/include/pandas/datetime/date_conversions.h
+++ b/pandas/_libs/include/pandas/datetime/date_conversions.h
@@ -21,8 +21,4 @@ int scaleNanosecToUnit(npy_int64 *value, NPY_DATETIMEUNIT unit);
char *int64ToIso(int64_t value, NPY_DATETIMEUNIT valueUnit,
NPY_DATETIMEUNIT base, size_t *len);
-// TODO(username): this function doesn't do a lot; should augment or
-// replace with scaleNanosecToUnit
-npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base);
-
char *int64ToIsoDuration(int64_t value, size_t *len);
diff --git a/pandas/_libs/include/pandas/datetime/pd_datetime.h b/pandas/_libs/include/pandas/datetime/pd_datetime.h
index 714d264924750..7674fbbe743fe 100644
--- a/pandas/_libs/include/pandas/datetime/pd_datetime.h
+++ b/pandas/_libs/include/pandas/datetime/pd_datetime.h
@@ -35,7 +35,6 @@ typedef struct {
const npy_datetimestruct *);
int (*scaleNanosecToUnit)(npy_int64 *, NPY_DATETIMEUNIT);
char *(*int64ToIso)(int64_t, NPY_DATETIMEUNIT, NPY_DATETIMEUNIT, size_t *);
- npy_datetime (*NpyDateTimeToEpoch)(npy_datetime, NPY_DATETIMEUNIT);
char *(*PyDateTimeToIso)(PyObject *, NPY_DATETIMEUNIT, size_t *);
npy_datetime (*PyDateTimeToEpoch)(PyObject *, NPY_DATETIMEUNIT);
char *(*int64ToIsoDuration)(int64_t, size_t *);
diff --git a/pandas/_libs/src/datetime/date_conversions.c b/pandas/_libs/src/datetime/date_conversions.c
index 4b172349de8d3..7eaf8aad12f43 100644
--- a/pandas/_libs/src/datetime/date_conversions.c
+++ b/pandas/_libs/src/datetime/date_conversions.c
@@ -69,11 +69,6 @@ char *int64ToIso(int64_t value, NPY_DATETIMEUNIT valueUnit,
return result;
}
-npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base) {
- scaleNanosecToUnit(&dt, base);
- return dt;
-}
-
/* Converts the int64_t representation of a duration to ISO; mutates len */
char *int64ToIsoDuration(int64_t value, size_t *len) {
pandas_timedeltastruct tds;
diff --git a/pandas/_libs/src/datetime/pd_datetime.c b/pandas/_libs/src/datetime/pd_datetime.c
index b201023114f8a..030d734aeab21 100644
--- a/pandas/_libs/src/datetime/pd_datetime.c
+++ b/pandas/_libs/src/datetime/pd_datetime.c
@@ -171,14 +171,21 @@ static npy_datetime PyDateTimeToEpoch(PyObject *dt, NPY_DATETIMEUNIT base) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ValueError,
"Could not convert PyDateTime to numpy datetime");
+
+ return -1;
}
- // TODO(username): is setting errMsg required?
- // ((JSONObjectEncoder *)tc->encoder)->errorMsg = "";
- // return NULL;
}
npy_datetime npy_dt = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts);
- return NpyDateTimeToEpoch(npy_dt, base);
+ if (scaleNanosecToUnit(&npy_dt, base) == -1) {
+ PyErr_Format(PyExc_ValueError,
+ "Call to scaleNanosecToUnit with value %" NPY_DATETIME_FMT
+ " and base %d failed",
+ npy_dt, base);
+
+ return -1;
+ }
+ return npy_dt;
}
static int pandas_datetime_exec(PyObject *module) {
@@ -191,7 +198,6 @@ static int pandas_datetime_exec(PyObject *module) {
capi->npy_datetimestruct_to_datetime = npy_datetimestruct_to_datetime;
capi->scaleNanosecToUnit = scaleNanosecToUnit;
capi->int64ToIso = int64ToIso;
- capi->NpyDateTimeToEpoch = NpyDateTimeToEpoch;
capi->PyDateTimeToIso = PyDateTimeToIso;
capi->PyDateTimeToEpoch = PyDateTimeToEpoch;
capi->int64ToIsoDuration = int64ToIsoDuration;
diff --git a/pandas/_libs/src/vendored/ujson/python/objToJSON.c b/pandas/_libs/src/vendored/ujson/python/objToJSON.c
index 6271791fe201e..9f1c1d3f857d1 100644
--- a/pandas/_libs/src/vendored/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/vendored/ujson/python/objToJSON.c
@@ -1286,8 +1286,12 @@ static char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
} else {
int size_of_cLabel = 21; // 21 chars for int 64
cLabel = PyObject_Malloc(size_of_cLabel);
- snprintf(cLabel, size_of_cLabel, "%" NPY_DATETIME_FMT,
- NpyDateTimeToEpoch(i8date, base));
+ if (scaleNanosecToUnit(&i8date, base) == -1) {
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+ snprintf(cLabel, size_of_cLabel, "%" NPY_DATETIME_FMT, i8date);
len = strlen(cLabel);
}
}
@@ -1373,7 +1377,7 @@ static void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
tc->prv = pc;
if (PyTypeNum_ISDATETIME(enc->npyType)) {
- const int64_t longVal = *(npy_int64 *)enc->npyValue;
+ int64_t longVal = *(npy_int64 *)enc->npyValue;
if (longVal == get_nat()) {
tc->type = JT_NULL;
} else {
@@ -1389,7 +1393,10 @@ static void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
tc->type = JT_UTF8;
} else {
NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit;
- pc->longValue = NpyDateTimeToEpoch(longVal, base);
+ if (scaleNanosecToUnit(&longVal, base) == -1) {
+ goto INVALID;
+ }
+ pc->longValue = longVal;
tc->type = JT_LONG;
}
}
| Duplicative of scaleNanosecToUnit but swallows errors | https://api.github.com/repos/pandas-dev/pandas/pulls/56276 | 2023-12-01T03:38:23Z | 2023-12-01T18:34:38Z | 2023-12-01T18:34:38Z | 2023-12-01T18:34:45Z |
Remove inline_helper.h header file | diff --git a/pandas/_libs/include/pandas/inline_helper.h b/pandas/_libs/include/pandas/inline_helper.h
deleted file mode 100644
index 1e03da1327470..0000000000000
--- a/pandas/_libs/include/pandas/inline_helper.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright (c) 2016, PyData Development Team
-All rights reserved.
-
-Distributed under the terms of the BSD Simplified License.
-
-The full license is in the LICENSE file, distributed with this software.
-*/
-
-#pragma once
-
-#ifndef PANDAS_INLINE
-#if defined(__clang__)
-#define PANDAS_INLINE static __inline__ __attribute__((__unused__))
-#elif defined(__GNUC__)
-#define PANDAS_INLINE static __inline__
-#elif defined(_MSC_VER)
-#define PANDAS_INLINE static __inline
-#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-#define PANDAS_INLINE static inline
-#else
-#define PANDAS_INLINE
-#endif // __GNUC__
-#endif // PANDAS_INLINE
diff --git a/pandas/_libs/include/pandas/parser/tokenizer.h b/pandas/_libs/include/pandas/parser/tokenizer.h
index 6a46ad637a401..f7e36a2f05a70 100644
--- a/pandas/_libs/include/pandas/parser/tokenizer.h
+++ b/pandas/_libs/include/pandas/parser/tokenizer.h
@@ -18,7 +18,6 @@ See LICENSE for the license
#define ERROR_OVERFLOW 2
#define ERROR_INVALID_CHARS 3
-#include "pandas/inline_helper.h"
#include "pandas/portable.h"
#include <stdint.h>
diff --git a/pandas/_libs/include/pandas/skiplist.h b/pandas/_libs/include/pandas/skiplist.h
index d002dba193279..57e304b3a66c0 100644
--- a/pandas/_libs/include/pandas/skiplist.h
+++ b/pandas/_libs/include/pandas/skiplist.h
@@ -15,13 +15,12 @@ Python recipe (https://rhettinger.wordpress.com/2010/02/06/lost-knowledge/)
#pragma once
-#include "pandas/inline_helper.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-PANDAS_INLINE float __skiplist_nanf(void) {
+static inline float __skiplist_nanf(void) {
const union {
int __i;
float __f;
@@ -30,7 +29,7 @@ PANDAS_INLINE float __skiplist_nanf(void) {
}
#define PANDAS_NAN ((double)__skiplist_nanf())
-PANDAS_INLINE double Log2(double val) { return log(val) / log(2.); }
+static inline double Log2(double val) { return log(val) / log(2.); }
typedef struct node_t node_t;
@@ -51,13 +50,13 @@ typedef struct {
int maxlevels;
} skiplist_t;
-PANDAS_INLINE double urand(void) {
+static inline double urand(void) {
return ((double)rand() + 1) / ((double)RAND_MAX + 2);
}
-PANDAS_INLINE int int_min(int a, int b) { return a < b ? a : b; }
+static inline int int_min(int a, int b) { return a < b ? a : b; }
-PANDAS_INLINE node_t *node_init(double value, int levels) {
+static inline node_t *node_init(double value, int levels) {
node_t *result;
result = (node_t *)malloc(sizeof(node_t));
if (result) {
@@ -78,9 +77,9 @@ PANDAS_INLINE node_t *node_init(double value, int levels) {
}
// do this ourselves
-PANDAS_INLINE void node_incref(node_t *node) { ++(node->ref_count); }
+static inline void node_incref(node_t *node) { ++(node->ref_count); }
-PANDAS_INLINE void node_decref(node_t *node) { --(node->ref_count); }
+static inline void node_decref(node_t *node) { --(node->ref_count); }
static void node_destroy(node_t *node) {
int i;
@@ -100,7 +99,7 @@ static void node_destroy(node_t *node) {
}
}
-PANDAS_INLINE void skiplist_destroy(skiplist_t *skp) {
+static inline void skiplist_destroy(skiplist_t *skp) {
if (skp) {
node_destroy(skp->head);
free(skp->tmp_steps);
@@ -109,7 +108,7 @@ PANDAS_INLINE void skiplist_destroy(skiplist_t *skp) {
}
}
-PANDAS_INLINE skiplist_t *skiplist_init(int expected_size) {
+static inline skiplist_t *skiplist_init(int expected_size) {
skiplist_t *result;
node_t *NIL, *head;
int maxlevels, i;
@@ -147,7 +146,7 @@ PANDAS_INLINE skiplist_t *skiplist_init(int expected_size) {
}
// 1 if left < right, 0 if left == right, -1 if left > right
-PANDAS_INLINE int _node_cmp(node_t *node, double value) {
+static inline int _node_cmp(node_t *node, double value) {
if (node->is_nil || node->value > value) {
return -1;
} else if (node->value < value) {
@@ -157,7 +156,7 @@ PANDAS_INLINE int _node_cmp(node_t *node, double value) {
}
}
-PANDAS_INLINE double skiplist_get(skiplist_t *skp, int i, int *ret) {
+static inline double skiplist_get(skiplist_t *skp, int i, int *ret) {
node_t *node;
int level;
@@ -181,7 +180,7 @@ PANDAS_INLINE double skiplist_get(skiplist_t *skp, int i, int *ret) {
// Returns the lowest rank of all elements with value `value`, as opposed to the
// highest rank returned by `skiplist_insert`.
-PANDAS_INLINE int skiplist_min_rank(skiplist_t *skp, double value) {
+static inline int skiplist_min_rank(skiplist_t *skp, double value) {
node_t *node;
int level, rank = 0;
@@ -199,7 +198,7 @@ PANDAS_INLINE int skiplist_min_rank(skiplist_t *skp, double value) {
// Returns the rank of the inserted element. When there are duplicates,
// `rank` is the highest of the group, i.e. the 'max' method of
// https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rank.html
-PANDAS_INLINE int skiplist_insert(skiplist_t *skp, double value) {
+static inline int skiplist_insert(skiplist_t *skp, double value) {
node_t *node, *prevnode, *newnode, *next_at_level;
int *steps_at_level;
int size, steps, level, rank = 0;
@@ -253,7 +252,7 @@ PANDAS_INLINE int skiplist_insert(skiplist_t *skp, double value) {
return rank + 1;
}
-PANDAS_INLINE int skiplist_remove(skiplist_t *skp, double value) {
+static inline int skiplist_remove(skiplist_t *skp, double value) {
int level, size;
node_t *node, *prevnode, *tmpnode, *next_at_level;
node_t **chain;
diff --git a/pandas/_libs/include/pandas/vendored/klib/khash.h b/pandas/_libs/include/pandas/vendored/klib/khash.h
index 31d12a3b30001..f072106e09596 100644
--- a/pandas/_libs/include/pandas/vendored/klib/khash.h
+++ b/pandas/_libs/include/pandas/vendored/klib/khash.h
@@ -85,7 +85,6 @@ int main() {
#define AC_VERSION_KHASH_H "0.2.6"
-#include "pandas/inline_helper.h"
#include <limits.h>
#include <stdlib.h>
#include <string.h>
@@ -153,7 +152,7 @@ typedef khuint_t khiter_t;
// specializations of
// https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp
-khuint32_t PANDAS_INLINE murmur2_32to32(khuint32_t k) {
+static inline khuint32_t murmur2_32to32(khuint32_t k) {
const khuint32_t SEED = 0xc70f6907UL;
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
@@ -186,7 +185,7 @@ khuint32_t PANDAS_INLINE murmur2_32to32(khuint32_t k) {
// - no performance difference could be measured compared to a possible
// x64-version
-khuint32_t PANDAS_INLINE murmur2_32_32to32(khuint32_t k1, khuint32_t k2) {
+static inline khuint32_t murmur2_32_32to32(khuint32_t k1, khuint32_t k2) {
const khuint32_t SEED = 0xc70f6907UL;
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
@@ -220,7 +219,7 @@ khuint32_t PANDAS_INLINE murmur2_32_32to32(khuint32_t k1, khuint32_t k2) {
return h;
}
-khuint32_t PANDAS_INLINE murmur2_64to32(khuint64_t k) {
+static inline khuint32_t murmur2_64to32(khuint64_t k) {
khuint32_t k1 = (khuint32_t)k;
khuint32_t k2 = (khuint32_t)(k >> 32);
@@ -445,7 +444,7 @@ static const double __ac_HASH_UPPER = 0.77;
#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, \
__hash_equal) \
- KHASH_INIT2(name, PANDAS_INLINE, khkey_t, khval_t, kh_is_map, __hash_func, \
+ KHASH_INIT2(name, static inline, khkey_t, khval_t, kh_is_map, __hash_func, \
__hash_equal)
/* --- BEGIN OF HASH FUNCTIONS --- */
@@ -465,7 +464,7 @@ static const double __ac_HASH_UPPER = 0.77;
@param key The integer [khuint64_t]
@return The hash value [khuint_t]
*/
-PANDAS_INLINE khuint_t kh_int64_hash_func(khuint64_t key) {
+static inline khuint_t kh_int64_hash_func(khuint64_t key) {
return (khuint_t)((key) >> 33 ^ (key) ^ (key) << 11);
}
/*! @function
@@ -478,7 +477,7 @@ PANDAS_INLINE khuint_t kh_int64_hash_func(khuint64_t key) {
@param s Pointer to a null terminated string
@return The hash value
*/
-PANDAS_INLINE khuint_t __ac_X31_hash_string(const char *s) {
+static inline khuint_t __ac_X31_hash_string(const char *s) {
khuint_t h = *s;
if (h)
for (++s; *s; ++s)
@@ -496,7 +495,7 @@ PANDAS_INLINE khuint_t __ac_X31_hash_string(const char *s) {
*/
#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
-PANDAS_INLINE khuint_t __ac_Wang_hash(khuint_t key) {
+static inline khuint_t __ac_Wang_hash(khuint_t key) {
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
diff --git a/pandas/_libs/include/pandas/vendored/klib/khash_python.h b/pandas/_libs/include/pandas/vendored/klib/khash_python.h
index dc16a4ada1716..5a933b45d9e21 100644
--- a/pandas/_libs/include/pandas/vendored/klib/khash_python.h
+++ b/pandas/_libs/include/pandas/vendored/klib/khash_python.h
@@ -83,13 +83,13 @@ void traced_free(void *ptr) {
// implementation of dicts, which shines for smaller sizes but is more
// predisposed to superlinear running times (see GH 36729 for comparison)
-khuint64_t PANDAS_INLINE asuint64(double key) {
+static inline khuint64_t asuint64(double key) {
khuint64_t val;
memcpy(&val, &key, sizeof(double));
return val;
}
-khuint32_t PANDAS_INLINE asuint32(float key) {
+static inline khuint32_t asuint32(float key) {
khuint32_t val;
memcpy(&val, &key, sizeof(float));
return val;
@@ -98,7 +98,7 @@ khuint32_t PANDAS_INLINE asuint32(float key) {
#define ZERO_HASH 0
#define NAN_HASH 0
-khuint32_t PANDAS_INLINE kh_float64_hash_func(double val) {
+static inline khuint32_t kh_float64_hash_func(double val) {
// 0.0 and -0.0 should have the same hash:
if (val == 0.0) {
return ZERO_HASH;
@@ -111,7 +111,7 @@ khuint32_t PANDAS_INLINE kh_float64_hash_func(double val) {
return murmur2_64to32(as_int);
}
-khuint32_t PANDAS_INLINE kh_float32_hash_func(float val) {
+static inline khuint32_t kh_float32_hash_func(float val) {
// 0.0 and -0.0 should have the same hash:
if (val == 0.0f) {
return ZERO_HASH;
@@ -138,10 +138,10 @@ KHASH_MAP_INIT_FLOAT64(float64, size_t)
KHASH_MAP_INIT_FLOAT32(float32, size_t)
-khint32_t PANDAS_INLINE kh_complex128_hash_func(khcomplex128_t val) {
+static inline khint32_t kh_complex128_hash_func(khcomplex128_t val) {
return kh_float64_hash_func(val.real) ^ kh_float64_hash_func(val.imag);
}
-khint32_t PANDAS_INLINE kh_complex64_hash_func(khcomplex64_t val) {
+static inline khint32_t kh_complex64_hash_func(khcomplex64_t val) {
return kh_float32_hash_func(val.real) ^ kh_float32_hash_func(val.imag);
}
@@ -164,7 +164,7 @@ KHASH_MAP_INIT_COMPLEX128(complex128, size_t)
#define kh_exist_complex128(h, k) (kh_exist(h, k))
// NaN-floats should be in the same equivalency class, see GH 22119
-int PANDAS_INLINE floatobject_cmp(PyFloatObject *a, PyFloatObject *b) {
+static inline int floatobject_cmp(PyFloatObject *a, PyFloatObject *b) {
return (Py_IS_NAN(PyFloat_AS_DOUBLE(a)) && Py_IS_NAN(PyFloat_AS_DOUBLE(b))) ||
(PyFloat_AS_DOUBLE(a) == PyFloat_AS_DOUBLE(b));
}
@@ -172,7 +172,7 @@ int PANDAS_INLINE floatobject_cmp(PyFloatObject *a, PyFloatObject *b) {
// NaNs should be in the same equivalency class, see GH 41836
// PyObject_RichCompareBool for complexobjects has a different behavior
// needs to be replaced
-int PANDAS_INLINE complexobject_cmp(PyComplexObject *a, PyComplexObject *b) {
+static inline int complexobject_cmp(PyComplexObject *a, PyComplexObject *b) {
return (Py_IS_NAN(a->cval.real) && Py_IS_NAN(b->cval.real) &&
Py_IS_NAN(a->cval.imag) && Py_IS_NAN(b->cval.imag)) ||
(Py_IS_NAN(a->cval.real) && Py_IS_NAN(b->cval.real) &&
@@ -182,12 +182,12 @@ int PANDAS_INLINE complexobject_cmp(PyComplexObject *a, PyComplexObject *b) {
(a->cval.real == b->cval.real && a->cval.imag == b->cval.imag);
}
-int PANDAS_INLINE pyobject_cmp(PyObject *a, PyObject *b);
+static inline int pyobject_cmp(PyObject *a, PyObject *b);
// replacing PyObject_RichCompareBool (NaN!=NaN) with pyobject_cmp (NaN==NaN),
// which treats NaNs as equivalent
// see GH 41836
-int PANDAS_INLINE tupleobject_cmp(PyTupleObject *a, PyTupleObject *b) {
+static inline int tupleobject_cmp(PyTupleObject *a, PyTupleObject *b) {
Py_ssize_t i;
if (Py_SIZE(a) != Py_SIZE(b)) {
@@ -202,7 +202,7 @@ int PANDAS_INLINE tupleobject_cmp(PyTupleObject *a, PyTupleObject *b) {
return 1;
}
-int PANDAS_INLINE pyobject_cmp(PyObject *a, PyObject *b) {
+static inline int pyobject_cmp(PyObject *a, PyObject *b) {
if (a == b) {
return 1;
}
@@ -230,7 +230,7 @@ int PANDAS_INLINE pyobject_cmp(PyObject *a, PyObject *b) {
return result;
}
-Py_hash_t PANDAS_INLINE _Pandas_HashDouble(double val) {
+static inline Py_hash_t _Pandas_HashDouble(double val) {
// Since Python3.10, nan is no longer has hash 0
if (Py_IS_NAN(val)) {
return 0;
@@ -242,14 +242,14 @@ Py_hash_t PANDAS_INLINE _Pandas_HashDouble(double val) {
#endif
}
-Py_hash_t PANDAS_INLINE floatobject_hash(PyFloatObject *key) {
+static inline Py_hash_t floatobject_hash(PyFloatObject *key) {
return _Pandas_HashDouble(PyFloat_AS_DOUBLE(key));
}
#define _PandasHASH_IMAG 1000003UL
// replaces _Py_HashDouble with _Pandas_HashDouble
-Py_hash_t PANDAS_INLINE complexobject_hash(PyComplexObject *key) {
+static inline Py_hash_t complexobject_hash(PyComplexObject *key) {
Py_uhash_t realhash = (Py_uhash_t)_Pandas_HashDouble(key->cval.real);
Py_uhash_t imaghash = (Py_uhash_t)_Pandas_HashDouble(key->cval.imag);
if (realhash == (Py_uhash_t)-1 || imaghash == (Py_uhash_t)-1) {
@@ -262,7 +262,7 @@ Py_hash_t PANDAS_INLINE complexobject_hash(PyComplexObject *key) {
return (Py_hash_t)combined;
}
-khuint32_t PANDAS_INLINE kh_python_hash_func(PyObject *key);
+static inline khuint32_t kh_python_hash_func(PyObject *key);
// we could use any hashing algorithm, this is the original CPython's for tuples
@@ -280,7 +280,7 @@ khuint32_t PANDAS_INLINE kh_python_hash_func(PyObject *key);
((x << 13) | (x >> 19)) /* Rotate left 13 bits */
#endif
-Py_hash_t PANDAS_INLINE tupleobject_hash(PyTupleObject *key) {
+static inline Py_hash_t tupleobject_hash(PyTupleObject *key) {
Py_ssize_t i, len = Py_SIZE(key);
PyObject **item = key->ob_item;
@@ -304,7 +304,7 @@ Py_hash_t PANDAS_INLINE tupleobject_hash(PyTupleObject *key) {
return acc;
}
-khuint32_t PANDAS_INLINE kh_python_hash_func(PyObject *key) {
+static inline khuint32_t kh_python_hash_func(PyObject *key) {
Py_hash_t hash;
// For PyObject_Hash holds:
// hash(0.0) == 0 == hash(-0.0)
@@ -373,14 +373,14 @@ typedef struct {
typedef kh_str_starts_t *p_kh_str_starts_t;
-p_kh_str_starts_t PANDAS_INLINE kh_init_str_starts(void) {
+static inline p_kh_str_starts_t kh_init_str_starts(void) {
kh_str_starts_t *result =
(kh_str_starts_t *)KHASH_CALLOC(1, sizeof(kh_str_starts_t));
result->table = kh_init_str();
return result;
}
-khuint_t PANDAS_INLINE kh_put_str_starts_item(kh_str_starts_t *table, char *key,
+static inline khuint_t kh_put_str_starts_item(kh_str_starts_t *table, char *key,
int *ret) {
khuint_t result = kh_put_str(table->table, key, ret);
if (*ret != 0) {
@@ -389,7 +389,7 @@ khuint_t PANDAS_INLINE kh_put_str_starts_item(kh_str_starts_t *table, char *key,
return result;
}
-khuint_t PANDAS_INLINE kh_get_str_starts_item(const kh_str_starts_t *table,
+static inline khuint_t kh_get_str_starts_item(const kh_str_starts_t *table,
const char *key) {
unsigned char ch = *key;
if (table->starts[ch]) {
@@ -399,18 +399,18 @@ khuint_t PANDAS_INLINE kh_get_str_starts_item(const kh_str_starts_t *table,
return 0;
}
-void PANDAS_INLINE kh_destroy_str_starts(kh_str_starts_t *table) {
+static inline void kh_destroy_str_starts(kh_str_starts_t *table) {
kh_destroy_str(table->table);
KHASH_FREE(table);
}
-void PANDAS_INLINE kh_resize_str_starts(kh_str_starts_t *table, khuint_t val) {
+static inline void kh_resize_str_starts(kh_str_starts_t *table, khuint_t val) {
kh_resize_str(table->table, val);
}
// utility function: given the number of elements
// returns number of necessary buckets
-khuint_t PANDAS_INLINE kh_needed_n_buckets(khuint_t n_elements) {
+static inline khuint_t kh_needed_n_buckets(khuint_t n_elements) {
khuint_t candidate = n_elements;
kroundup32(candidate);
khuint_t upper_bound = (khuint_t)(candidate * __ac_HASH_UPPER + 0.5);
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index c9466c485ae94..f3d976841808c 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -343,7 +343,7 @@ static int push_char(parser_t *self, char c) {
return 0;
}
-int PANDAS_INLINE end_field(parser_t *self) {
+static inline int end_field(parser_t *self) {
// XXX cruft
if (self->words_len >= self->words_cap) {
TRACE(("end_field: ERROR!!! self->words_len(%zu) >= "
| `inline` is a part of C99, so we don't need to define a helper for it anymore (we now require a C11 compiler) | https://api.github.com/repos/pandas-dev/pandas/pulls/56275 | 2023-12-01T02:56:43Z | 2023-12-01T18:35:36Z | 2023-12-01T18:35:36Z | 2023-12-01T18:35:43Z |
Clean up tokenizer / parser files | diff --git a/pandas/_libs/include/pandas/parser/io.h b/pandas/_libs/include/pandas/parser/io.h
index cbe6bc04b7663..c707c23b567d2 100644
--- a/pandas/_libs/include/pandas/parser/io.h
+++ b/pandas/_libs/include/pandas/parser/io.h
@@ -25,7 +25,7 @@ typedef struct _rd_source {
void *new_rd_source(PyObject *obj);
-int del_rd_source(void *src);
+void del_rd_source(void *src);
-void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
+char *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
int *status, const char *encoding_errors);
diff --git a/pandas/_libs/include/pandas/parser/pd_parser.h b/pandas/_libs/include/pandas/parser/pd_parser.h
index 61f15dcef8d27..58a09ae1bba39 100644
--- a/pandas/_libs/include/pandas/parser/pd_parser.h
+++ b/pandas/_libs/include/pandas/parser/pd_parser.h
@@ -20,8 +20,8 @@ typedef struct {
int (*to_double)(char *, double *, char, char, int *);
int (*floatify)(PyObject *, double *, int *);
void *(*new_rd_source)(PyObject *);
- int (*del_rd_source)(void *);
- void *(*buffer_rd_bytes)(void *, size_t, size_t *, int *, const char *);
+ void (*del_rd_source)(void *);
+ char *(*buffer_rd_bytes)(void *, size_t, size_t *, int *, const char *);
void (*uint_state_init)(uint_state *);
int (*uint64_conflict)(uint_state *);
void (*coliter_setup)(coliter_t *, parser_t *, int64_t, int64_t);
@@ -30,7 +30,7 @@ typedef struct {
void (*parser_free)(parser_t *);
void (*parser_del)(parser_t *);
int (*parser_add_skiprow)(parser_t *, int64_t);
- int (*parser_set_skipfirstnrows)(parser_t *, int64_t);
+ void (*parser_set_skipfirstnrows)(parser_t *, int64_t);
void (*parser_set_default_options)(parser_t *);
int (*parser_consume_rows)(parser_t *, size_t);
int (*parser_trim_buffers)(parser_t *);
diff --git a/pandas/_libs/include/pandas/parser/tokenizer.h b/pandas/_libs/include/pandas/parser/tokenizer.h
index 6a46ad637a401..ade783e3716de 100644
--- a/pandas/_libs/include/pandas/parser/tokenizer.h
+++ b/pandas/_libs/include/pandas/parser/tokenizer.h
@@ -84,9 +84,9 @@ typedef enum {
typedef enum { ERROR, WARN, SKIP } BadLineHandleMethod;
-typedef void *(*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
+typedef char *(*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
int *status, const char *encoding_errors);
-typedef int (*io_cleanup)(void *src);
+typedef void (*io_cleanup)(void *src);
typedef struct parser_t {
void *source;
@@ -187,7 +187,7 @@ int parser_trim_buffers(parser_t *self);
int parser_add_skiprow(parser_t *self, int64_t row);
-int parser_set_skipfirstnrows(parser_t *self, int64_t nrows);
+void parser_set_skipfirstnrows(parser_t *self, int64_t nrows);
void parser_free(parser_t *self);
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index ab28b34be58f2..204b242d9eb73 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -152,9 +152,9 @@ cdef extern from "pandas/parser/tokenizer.h":
WARN,
SKIP
- ctypedef void* (*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
+ ctypedef char* (*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
int *status, const char *encoding_errors)
- ctypedef int (*io_cleanup)(void *src)
+ ctypedef void (*io_cleanup)(void *src)
ctypedef struct parser_t:
void *source
@@ -247,9 +247,9 @@ cdef extern from "pandas/parser/tokenizer.h":
cdef extern from "pandas/parser/pd_parser.h":
void *new_rd_source(object obj) except NULL
- int del_rd_source(void *src)
+ void del_rd_source(void *src)
- void* buffer_rd_bytes(void *source, size_t nbytes,
+ char* buffer_rd_bytes(void *source, size_t nbytes,
size_t *bytes_read, int *status, const char *encoding_errors)
void uint_state_init(uint_state *self)
@@ -266,7 +266,7 @@ cdef extern from "pandas/parser/pd_parser.h":
void parser_del(parser_t *self) nogil
int parser_add_skiprow(parser_t *self, int64_t row)
- int parser_set_skipfirstnrows(parser_t *self, int64_t nrows)
+ void parser_set_skipfirstnrows(parser_t *self, int64_t nrows)
void parser_set_default_options(parser_t *self)
@@ -318,13 +318,13 @@ cdef double round_trip_wrapper(const char *p, char **q, char decimal,
return round_trip(p, q, decimal, sci, tsep, skip_trailing, error, maybe_int)
-cdef void* buffer_rd_bytes_wrapper(void *source, size_t nbytes,
+cdef char* buffer_rd_bytes_wrapper(void *source, size_t nbytes,
size_t *bytes_read, int *status,
const char *encoding_errors) noexcept:
return buffer_rd_bytes(source, nbytes, bytes_read, status, encoding_errors)
-cdef int del_rd_source_wrapper(void *src) noexcept:
- return del_rd_source(src)
+cdef void del_rd_source_wrapper(void *src) noexcept:
+ del_rd_source(src)
cdef class TextReader:
diff --git a/pandas/_libs/src/parser/io.c b/pandas/_libs/src/parser/io.c
index 29c2c8d095907..851901481d222 100644
--- a/pandas/_libs/src/parser/io.c
+++ b/pandas/_libs/src/parser/io.c
@@ -35,12 +35,10 @@ void *new_rd_source(PyObject *obj) {
*/
-int del_rd_source(void *rds) {
+void del_rd_source(void *rds) {
Py_XDECREF(RDS(rds)->obj);
Py_XDECREF(RDS(rds)->buffer);
free(rds);
-
- return 0;
}
/*
@@ -49,26 +47,20 @@ int del_rd_source(void *rds) {
*/
-void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
+char *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
int *status, const char *encoding_errors) {
- PyGILState_STATE state;
- PyObject *result, *func, *args, *tmp;
-
- void *retval;
-
- size_t length;
rd_source *src = RDS(source);
- state = PyGILState_Ensure();
+ PyGILState_STATE state = PyGILState_Ensure();
/* delete old object */
Py_XDECREF(src->buffer);
src->buffer = NULL;
- args = Py_BuildValue("(i)", nbytes);
+ PyObject *args = Py_BuildValue("(i)", nbytes);
- func = PyObject_GetAttrString(src->obj, "read");
+ PyObject *func = PyObject_GetAttrString(src->obj, "read");
/* Note: PyObject_CallObject requires the GIL */
- result = PyObject_CallObject(func, args);
+ PyObject *result = PyObject_CallObject(func, args);
Py_XDECREF(args);
Py_XDECREF(func);
@@ -78,7 +70,7 @@ void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
*status = CALLING_READ_FAILED;
return NULL;
} else if (!PyBytes_Check(result)) {
- tmp = PyUnicode_AsEncodedString(result, "utf-8", encoding_errors);
+ PyObject *tmp = PyUnicode_AsEncodedString(result, "utf-8", encoding_errors);
Py_DECREF(result);
if (tmp == NULL) {
PyGILState_Release(state);
@@ -87,7 +79,7 @@ void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
result = tmp;
}
- length = PySequence_Length(result);
+ const size_t length = PySequence_Length(result);
if (length == 0)
*status = REACHED_EOF;
@@ -96,7 +88,7 @@ void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
/* hang on to the Python object */
src->buffer = result;
- retval = (void *)PyBytes_AsString(result);
+ char *retval = PyBytes_AsString(result);
PyGILState_Release(state);
diff --git a/pandas/_libs/src/parser/pd_parser.c b/pandas/_libs/src/parser/pd_parser.c
index 41689704ccffc..88b6603c3c6f9 100644
--- a/pandas/_libs/src/parser/pd_parser.c
+++ b/pandas/_libs/src/parser/pd_parser.c
@@ -24,7 +24,6 @@ static int to_double(char *item, double *p_value, char sci, char decimal,
}
static int floatify(PyObject *str, double *result, int *maybe_int) {
- int status;
char *data;
PyObject *tmp = NULL;
const char sci = 'E';
@@ -43,7 +42,7 @@ static int floatify(PyObject *str, double *result, int *maybe_int) {
return -1;
}
- status = to_double(data, result, sci, dec, maybe_int);
+ const int status = to_double(data, result, sci, dec, maybe_int);
if (!status) {
/* handle inf/-inf infinity/-infinity */
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index c9466c485ae94..efe448b034806 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -22,6 +22,7 @@ GitHub. See Python Software Foundation License and BSD licenses for these.
#include <ctype.h>
#include <float.h>
#include <math.h>
+#include <stdbool.h>
#include "pandas/portable.h"
@@ -107,18 +108,7 @@ void parser_set_default_options(parser_t *self) {
parser_t *parser_new(void) { return (parser_t *)calloc(1, sizeof(parser_t)); }
-int parser_clear_data_buffers(parser_t *self) {
- free_if_not_null((void *)&self->stream);
- free_if_not_null((void *)&self->words);
- free_if_not_null((void *)&self->word_starts);
- free_if_not_null((void *)&self->line_start);
- free_if_not_null((void *)&self->line_fields);
- return 0;
-}
-
-int parser_cleanup(parser_t *self) {
- int status = 0;
-
+static void parser_cleanup(parser_t *self) {
// XXX where to put this
free_if_not_null((void *)&self->error_msg);
free_if_not_null((void *)&self->warn_msg);
@@ -128,23 +118,13 @@ int parser_cleanup(parser_t *self) {
self->skipset = NULL;
}
- if (parser_clear_data_buffers(self) < 0) {
- status = -1;
- }
-
if (self->cb_cleanup != NULL) {
- if (self->cb_cleanup(self->source) < 0) {
- status = -1;
- }
+ self->cb_cleanup(self->source);
self->cb_cleanup = NULL;
}
-
- return status;
}
int parser_init(parser_t *self) {
- int64_t sz;
-
/*
Initialize data buffers
*/
@@ -167,8 +147,9 @@ int parser_init(parser_t *self) {
self->stream_len = 0;
// word pointers and metadata
- sz = STREAM_INIT_SIZE / 10;
- sz = sz ? sz : 1;
+ _Static_assert(STREAM_INIT_SIZE / 10 > 0,
+ "STREAM_INIT_SIZE must be defined and >= 10");
+ const int64_t sz = STREAM_INIT_SIZE / 10;
self->words = malloc(sz * sizeof(char *));
self->word_starts = malloc(sz * sizeof(int64_t));
self->max_words_cap = sz;
@@ -220,17 +201,14 @@ void parser_free(parser_t *self) {
void parser_del(parser_t *self) { free(self); }
static int make_stream_space(parser_t *self, size_t nbytes) {
- uint64_t i, cap, length;
- int status;
- void *orig_ptr, *newptr;
-
// Can we fit potentially nbytes tokens (+ null terminators) in the stream?
/*
TOKEN STREAM
*/
- orig_ptr = (void *)self->stream;
+ int status;
+ char *orig_ptr = (void *)self->stream;
TRACE(("\n\nmake_stream_space: nbytes = %zu. grow_buffer(self->stream...)\n",
nbytes))
self->stream =
@@ -248,7 +226,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
if (self->stream != orig_ptr) {
self->pword_start = self->stream + self->word_start;
- for (i = 0; i < self->words_len; ++i) {
+ for (uint64_t i = 0; i < self->words_len; ++i) {
self->words[i] = self->stream + self->word_starts[i];
}
}
@@ -257,7 +235,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
WORD VECTORS
*/
- cap = self->words_cap;
+ const uint64_t words_cap = self->words_cap;
/**
* If we are reading in chunks, we need to be aware of the maximum number
@@ -267,11 +245,9 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
* Otherwise, we risk a buffer overflow if we mistakenly under-allocate
* just because a recent chunk did not have as many words.
*/
- if (self->words_len + nbytes < self->max_words_cap) {
- length = self->max_words_cap - nbytes - 1;
- } else {
- length = self->words_len;
- }
+ const uint64_t length = self->words_len + nbytes < self->max_words_cap
+ ? self->max_words_cap - nbytes - 1
+ : self->words_len;
self->words =
(char **)grow_buffer((void *)self->words, length, &self->words_cap,
@@ -284,23 +260,23 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
}
// realloc took place
- if (cap != self->words_cap) {
+ if (words_cap != self->words_cap) {
TRACE(("make_stream_space: cap != self->words_cap, nbytes = %d, "
"self->words_cap=%d\n",
nbytes, self->words_cap))
- newptr =
- realloc((void *)self->word_starts, sizeof(int64_t) * self->words_cap);
+ int64_t *newptr = (int64_t *)realloc(self->word_starts,
+ sizeof(int64_t) * self->words_cap);
if (newptr == NULL) {
return PARSER_OUT_OF_MEMORY;
} else {
- self->word_starts = (int64_t *)newptr;
+ self->word_starts = newptr;
}
}
/*
LINE VECTORS
*/
- cap = self->lines_cap;
+ const uint64_t lines_cap = self->lines_cap;
self->line_start = (int64_t *)grow_buffer((void *)self->line_start,
self->lines + 1, &self->lines_cap,
nbytes, sizeof(int64_t), &status);
@@ -312,14 +288,14 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
}
// realloc took place
- if (cap != self->lines_cap) {
+ if (lines_cap != self->lines_cap) {
TRACE(("make_stream_space: cap != self->lines_cap, nbytes = %d\n", nbytes))
- newptr =
- realloc((void *)self->line_fields, sizeof(int64_t) * self->lines_cap);
+ int64_t *newptr = (int64_t *)realloc(self->line_fields,
+ sizeof(int64_t) * self->lines_cap);
if (newptr == NULL) {
return PARSER_OUT_OF_MEMORY;
} else {
- self->line_fields = (int64_t *)newptr;
+ self->line_fields = newptr;
}
}
@@ -333,7 +309,7 @@ static int push_char(parser_t *self, char c) {
TRACE(("push_char: ERROR!!! self->stream_len(%d) >= "
"self->stream_cap(%d)\n",
self->stream_len, self->stream_cap))
- int64_t bufsize = 100;
+ const size_t bufsize = 100;
self->error_msg = malloc(bufsize);
snprintf(self->error_msg, bufsize,
"Buffer overflow caught - possible malformed input file.\n");
@@ -349,7 +325,7 @@ int PANDAS_INLINE end_field(parser_t *self) {
TRACE(("end_field: ERROR!!! self->words_len(%zu) >= "
"self->words_cap(%zu)\n",
self->words_len, self->words_cap))
- int64_t bufsize = 100;
+ const size_t bufsize = 100;
self->error_msg = malloc(bufsize);
snprintf(self->error_msg, bufsize,
"Buffer overflow caught - possible malformed input file.\n");
@@ -381,30 +357,24 @@ int PANDAS_INLINE end_field(parser_t *self) {
}
static void append_warning(parser_t *self, const char *msg) {
- int64_t ex_length;
- int64_t length = strlen(msg);
- void *newptr;
+ const int64_t length = strlen(msg);
if (self->warn_msg == NULL) {
self->warn_msg = malloc(length + 1);
snprintf(self->warn_msg, length + 1, "%s", msg);
} else {
- ex_length = strlen(self->warn_msg);
- newptr = realloc(self->warn_msg, ex_length + length + 1);
+ const int64_t ex_length = strlen(self->warn_msg);
+ char *newptr = (char *)realloc(self->warn_msg, ex_length + length + 1);
if (newptr != NULL) {
- self->warn_msg = (char *)newptr;
+ self->warn_msg = newptr;
snprintf(self->warn_msg + ex_length, length + 1, "%s", msg);
}
}
}
static int end_line(parser_t *self) {
- char *msg;
- int64_t fields;
int64_t ex_fields = self->expected_fields;
- int64_t bufsize = 100; // for error or warning messages
-
- fields = self->line_fields[self->lines];
+ int64_t fields = self->line_fields[self->lines];
TRACE(("end_line: Line end, nfields: %d\n", fields));
@@ -447,6 +417,7 @@ static int end_line(parser_t *self) {
// file_lines is now the actual file line number (starting at 1)
if (self->on_bad_lines == ERROR) {
+ const size_t bufsize = 100;
self->error_msg = malloc(bufsize);
snprintf(self->error_msg, bufsize,
"Expected %" PRId64 " fields in line %" PRIu64 ", saw %" PRId64
@@ -460,7 +431,8 @@ static int end_line(parser_t *self) {
// simply skip bad lines
if (self->on_bad_lines == WARN) {
// pass up error message
- msg = malloc(bufsize);
+ const size_t bufsize = 100;
+ char *msg = (char *)malloc(bufsize);
snprintf(msg, bufsize,
"Skipping line %" PRIu64 ": expected %" PRId64
" fields, saw %" PRId64 "\n",
@@ -474,7 +446,7 @@ static int end_line(parser_t *self) {
if ((self->lines >= self->header_end + 1) && fields < ex_fields) {
// might overrun the buffer when closing fields
if (make_stream_space(self, ex_fields - fields) < 0) {
- int64_t bufsize = 100;
+ const size_t bufsize = 100;
self->error_msg = malloc(bufsize);
snprintf(self->error_msg, bufsize, "out of memory");
return -1;
@@ -494,7 +466,7 @@ static int end_line(parser_t *self) {
if (self->lines >= self->lines_cap) {
TRACE(("end_line: ERROR!!! self->lines(%zu) >= self->lines_cap(%zu)\n",
self->lines, self->lines_cap))
- int64_t bufsize = 100;
+ const size_t bufsize = 100;
self->error_msg = malloc(bufsize);
snprintf(self->error_msg, bufsize,
"Buffer overflow caught - "
@@ -532,13 +504,11 @@ int parser_add_skiprow(parser_t *self, int64_t row) {
return 0;
}
-int parser_set_skipfirstnrows(parser_t *self, int64_t nrows) {
+void parser_set_skipfirstnrows(parser_t *self, int64_t nrows) {
// self->file_lines is zero based so subtract 1 from nrows
if (nrows > 0) {
self->skip_first_N_rows = nrows - 1;
}
-
- return 0;
}
static int parser_buffer_bytes(parser_t *self, size_t nbytes,
@@ -556,7 +526,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes,
self->datalen = bytes_read;
if (status != REACHED_EOF && self->data == NULL) {
- int64_t bufsize = 200;
+ const size_t bufsize = 200;
self->error_msg = malloc(bufsize);
if (status == CALLING_READ_FAILED) {
@@ -586,7 +556,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes,
if (slen >= self->stream_cap) { \
TRACE(("PUSH_CHAR: ERROR!!! slen(%d) >= stream_cap(%d)\n", slen, \
self->stream_cap)) \
- int64_t bufsize = 100; \
+ const size_t bufsize = 100; \
self->error_msg = malloc(bufsize); \
snprintf(self->error_msg, bufsize, \
"Buffer overflow caught - possible malformed input file.\n"); \
@@ -664,22 +634,14 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes,
self->datapos += 3; \
}
-int skip_this_line(parser_t *self, int64_t rownum) {
- int should_skip;
- PyObject *result;
- PyGILState_STATE state;
-
+static int skip_this_line(parser_t *self, int64_t rownum) {
if (self->skipfunc != NULL) {
- state = PyGILState_Ensure();
- result = PyObject_CallFunction(self->skipfunc, "i", rownum);
+ PyGILState_STATE state = PyGILState_Ensure();
+ PyObject *result = PyObject_CallFunction(self->skipfunc, "i", rownum);
// Error occurred. It will be processed
// and caught at the Cython level.
- if (result == NULL) {
- should_skip = -1;
- } else {
- should_skip = PyObject_IsTrue(result);
- }
+ const int should_skip = result == NULL ? -1 : PyObject_IsTrue(result);
Py_XDECREF(result);
PyGILState_Release(state);
@@ -693,12 +655,8 @@ int skip_this_line(parser_t *self, int64_t rownum) {
}
}
-int tokenize_bytes(parser_t *self, size_t line_limit, uint64_t start_lines) {
- int64_t i;
- uint64_t slen;
- int should_skip;
- char c;
- char *stream;
+static int tokenize_bytes(parser_t *self, size_t line_limit,
+ uint64_t start_lines) {
char *buf = self->data + self->datapos;
const char lineterminator =
@@ -716,14 +674,14 @@ int tokenize_bytes(parser_t *self, size_t line_limit, uint64_t start_lines) {
(self->escapechar != '\0') ? self->escapechar : 1000;
if (make_stream_space(self, self->datalen - self->datapos) < 0) {
- int64_t bufsize = 100;
+ const size_t bufsize = 100;
self->error_msg = malloc(bufsize);
snprintf(self->error_msg, bufsize, "out of memory");
return -1;
}
- stream = self->stream + self->stream_len;
- slen = self->stream_len;
+ char *stream = self->stream + self->stream_len;
+ uint64_t slen = self->stream_len;
TRACE(("%s\n", buf));
@@ -731,6 +689,8 @@ int tokenize_bytes(parser_t *self, size_t line_limit, uint64_t start_lines) {
CHECK_FOR_BOM();
}
+ char c;
+ int64_t i;
for (i = self->datapos; i < self->datalen; ++i) {
// next character in file
c = *buf++;
@@ -840,9 +800,9 @@ int tokenize_bytes(parser_t *self, size_t line_limit, uint64_t start_lines) {
break;
}
- case START_RECORD:
+ case START_RECORD: {
// start of record
- should_skip = skip_this_line(self, self->file_lines);
+ const int should_skip = skip_this_line(self, self->file_lines);
if (should_skip == -1) {
goto parsingerror;
@@ -894,7 +854,7 @@ int tokenize_bytes(parser_t *self, size_t line_limit, uint64_t start_lines) {
// normal character - fall through
// to handle as START_FIELD
self->state = START_FIELD;
-
+ }
case START_FIELD:
// expecting field
if (IS_TERMINATOR(c)) {
@@ -1111,7 +1071,7 @@ int tokenize_bytes(parser_t *self, size_t line_limit, uint64_t start_lines) {
}
static int parser_handle_eof(parser_t *self) {
- int64_t bufsize = 100;
+ const size_t bufsize = 100;
TRACE(("handling eof, datalen: %d, pstate: %d\n", self->datalen, self->state))
@@ -1155,9 +1115,6 @@ static int parser_handle_eof(parser_t *self) {
}
int parser_consume_rows(parser_t *self, size_t nrows) {
- int64_t offset, word_deletions;
- uint64_t char_count, i;
-
if (nrows > self->lines) {
nrows = self->lines;
}
@@ -1167,15 +1124,15 @@ int parser_consume_rows(parser_t *self, size_t nrows) {
return 0;
/* cannot guarantee that nrows + 1 has been observed */
- word_deletions = self->line_start[nrows - 1] + self->line_fields[nrows - 1];
- if (word_deletions >= 1) {
- char_count = (self->word_starts[word_deletions - 1] +
- strlen(self->words[word_deletions - 1]) + 1);
- } else {
- /* if word_deletions == 0 (i.e. this case) then char_count must
- * be 0 too, as no data needs to be skipped */
- char_count = 0;
- }
+ const int64_t word_deletions =
+ self->line_start[nrows - 1] + self->line_fields[nrows - 1];
+
+ /* if word_deletions == 0 (i.e. this case) then char_count must
+ * be 0 too, as no data needs to be skipped */
+ const int64_t char_count = word_deletions >= 1
+ ? (self->word_starts[word_deletions - 1] +
+ strlen(self->words[word_deletions - 1]) + 1)
+ : 0;
TRACE(("parser_consume_rows: Deleting %d words, %d chars\n", word_deletions,
char_count));
@@ -1191,7 +1148,8 @@ int parser_consume_rows(parser_t *self, size_t nrows) {
/* move token metadata */
// Note: We should always have words_len < word_deletions, so this
// subtraction will remain appropriately-typed.
- for (i = 0; i < self->words_len - word_deletions; ++i) {
+ int64_t offset;
+ for (uint64_t i = 0; i < self->words_len - word_deletions; ++i) {
offset = i + word_deletions;
self->words[i] = self->words[offset] - char_count;
@@ -1206,7 +1164,7 @@ int parser_consume_rows(parser_t *self, size_t nrows) {
/* move line metadata */
// Note: We should always have self->lines - nrows + 1 >= 0, so this
// subtraction will remain appropriately-typed.
- for (i = 0; i < self->lines - nrows + 1; ++i) {
+ for (uint64_t i = 0; i < self->lines - nrows + 1; ++i) {
offset = i + nrows;
self->line_start[i] = self->line_start[offset] - word_deletions;
self->line_fields[i] = self->line_fields[offset];
@@ -1227,10 +1185,6 @@ int parser_trim_buffers(parser_t *self) {
/*
Free memory
*/
- size_t new_cap;
- void *newptr;
-
- uint64_t i;
/**
* Before we free up space and trim, we should
@@ -1246,7 +1200,7 @@ int parser_trim_buffers(parser_t *self) {
}
/* trim words, word_starts */
- new_cap = _next_pow2(self->words_len) + 1;
+ size_t new_cap = _next_pow2(self->words_len) + 1;
if (new_cap < self->words_cap) {
TRACE(("parser_trim_buffers: new_cap < self->words_cap\n"));
self->words = realloc(self->words, new_cap * sizeof(char *));
@@ -1268,7 +1222,7 @@ int parser_trim_buffers(parser_t *self) {
if (new_cap < self->stream_cap) {
TRACE(("parser_trim_buffers: new_cap < self->stream_cap, calling "
"realloc\n"));
- newptr = realloc(self->stream, new_cap);
+ void *newptr = realloc(self->stream, new_cap);
if (newptr == NULL) {
return PARSER_OUT_OF_MEMORY;
} else {
@@ -1280,7 +1234,7 @@ int parser_trim_buffers(parser_t *self) {
if (self->stream != newptr) {
self->pword_start = (char *)newptr + self->word_start;
- for (i = 0; i < self->words_len; ++i) {
+ for (uint64_t i = 0; i < self->words_len; ++i) {
self->words[i] = (char *)newptr + self->word_starts[i];
}
}
@@ -1294,7 +1248,7 @@ int parser_trim_buffers(parser_t *self) {
new_cap = _next_pow2(self->lines) + 1;
if (new_cap < self->lines_cap) {
TRACE(("parser_trim_buffers: new_cap < self->lines_cap\n"));
- newptr = realloc(self->line_start, new_cap * sizeof(int64_t));
+ void *newptr = realloc(self->line_start, new_cap * sizeof(int64_t));
if (newptr == NULL) {
return PARSER_OUT_OF_MEMORY;
} else {
@@ -1317,10 +1271,10 @@ int parser_trim_buffers(parser_t *self) {
all : tokenize all the data vs. certain number of rows
*/
-int _tokenize_helper(parser_t *self, size_t nrows, int all,
- const char *encoding_errors) {
+static int _tokenize_helper(parser_t *self, size_t nrows, int all,
+ const char *encoding_errors) {
int status = 0;
- uint64_t start_lines = self->lines;
+ const uint64_t start_lines = self->lines;
if (self->state == FINISHED) {
return 0;
@@ -1367,13 +1321,11 @@ int _tokenize_helper(parser_t *self, size_t nrows, int all,
}
int tokenize_nrows(parser_t *self, size_t nrows, const char *encoding_errors) {
- int status = _tokenize_helper(self, nrows, 0, encoding_errors);
- return status;
+ return _tokenize_helper(self, nrows, 0, encoding_errors);
}
int tokenize_all_rows(parser_t *self, const char *encoding_errors) {
- int status = _tokenize_helper(self, -1, 1, encoding_errors);
- return status;
+ return _tokenize_helper(self, -1, 1, encoding_errors);
}
/*
@@ -1449,22 +1401,9 @@ int to_boolean(const char *item, uint8_t *val) {
// * Add tsep argument for thousands separator
//
-// pessimistic but quick assessment,
-// assuming that each decimal digit requires 4 bits to store
-const int max_int_decimal_digits = (sizeof(unsigned int) * 8) / 4;
-
double xstrtod(const char *str, char **endptr, char decimal, char sci,
char tsep, int skip_trailing, int *error, int *maybe_int) {
- double number;
- unsigned int i_number = 0;
- int exponent;
- int negative;
- char *p = (char *)str;
- double p10;
- int n;
- int num_digits;
- int num_decimals;
-
+ const char *p = str;
if (maybe_int != NULL)
*maybe_int = 1;
// Skip leading whitespace.
@@ -1472,7 +1411,7 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
p++;
// Handle optional sign.
- negative = 0;
+ int negative = 0;
switch (*p) {
case '-':
negative = 1; // Fall through to increment position.
@@ -1480,11 +1419,17 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
p++;
}
- exponent = 0;
- num_digits = 0;
- num_decimals = 0;
+ int exponent = 0;
+ int num_digits = 0;
+ int num_decimals = 0;
+
+ // pessimistic but quick assessment,
+ // assuming that each decimal digit requires 4 bits to store
+ // TODO: C23 has UINT64_WIDTH macro that can be used at compile time
+ const int max_int_decimal_digits = (sizeof(unsigned int) * 8) / 4;
// Process string of digits.
+ unsigned int i_number = 0;
while (isdigit_ascii(*p) && num_digits <= max_int_decimal_digits) {
i_number = i_number * 10 + (*p - '0');
p++;
@@ -1492,7 +1437,7 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
p += (tsep != '\0' && *p == tsep);
}
- number = i_number;
+ double number = i_number;
if (num_digits > max_int_decimal_digits) {
// process what's left as double
@@ -1546,7 +1491,7 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
// Process string of digits.
num_digits = 0;
- n = 0;
+ int n = 0;
while (isdigit_ascii(*p)) {
n = n * 10 + (*p - '0');
num_digits++;
@@ -1569,8 +1514,8 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
}
// Scale the result.
- p10 = 10.;
- n = exponent;
+ double p10 = 10.;
+ int n = exponent;
if (n < 0)
n = -n;
while (n) {
@@ -1595,21 +1540,15 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
}
if (endptr)
- *endptr = p;
+ *endptr = (char *)p;
return number;
}
double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
char tsep, int skip_trailing, int *error,
int *maybe_int) {
- double number;
- int exponent;
- int negative;
- char *p = (char *)str;
- int num_digits;
- int num_decimals;
- int max_digits = 17;
- int n;
+ const char *p = str;
+ const int max_digits = 17;
if (maybe_int != NULL)
*maybe_int = 1;
@@ -1652,7 +1591,7 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
p++;
// Handle optional sign.
- negative = 0;
+ int negative = 0;
switch (*p) {
case '-':
negative = 1; // Fall through to increment position.
@@ -1660,10 +1599,10 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
p++;
}
- number = 0.;
- exponent = 0;
- num_digits = 0;
- num_decimals = 0;
+ double number = 0.;
+ int exponent = 0;
+ int num_digits = 0;
+ int num_decimals = 0;
// Process string of digits.
while (isdigit_ascii(*p)) {
@@ -1723,7 +1662,7 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
// Process string of digits.
num_digits = 0;
- n = 0;
+ int n = 0;
while (num_digits < max_digits && isdigit_ascii(*p)) {
n = n * 10 + (*p - '0');
num_digits++;
@@ -1767,7 +1706,7 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
}
if (endptr)
- *endptr = p;
+ *endptr = (char *)p;
return number;
}
@@ -1777,10 +1716,10 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
with a call to `free`.
*/
-char *_str_copy_decimal_str_c(const char *s, char **endpos, char decimal,
- char tsep) {
+static char *_str_copy_decimal_str_c(const char *s, char **endpos, char decimal,
+ char tsep) {
const char *p = s;
- size_t length = strlen(s);
+ const size_t length = strlen(s);
char *s_copy = malloc(length + 1);
char *dst = s_copy;
// Skip leading whitespace.
@@ -1830,10 +1769,9 @@ double round_trip(const char *p, char **q, char decimal, char sci, char tsep,
char *pc = _str_copy_decimal_str_c(p, &endptr, decimal, tsep);
// This is called from a nogil block in parsers.pyx
// so need to explicitly get GIL before Python calls
- PyGILState_STATE gstate;
- gstate = PyGILState_Ensure();
+ PyGILState_STATE gstate = PyGILState_Ensure();
char *endpc;
- double r = PyOS_string_to_double(pc, &endpc, 0);
+ const double r = PyOS_string_to_double(pc, &endpc, 0);
// PyOS_string_to_double needs to consume the whole string
if (endpc == pc + strlen(pc)) {
if (q != NULL) {
@@ -1882,20 +1820,15 @@ int uint64_conflict(uint_state *self) {
int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max,
int *error, char tsep) {
const char *p = p_item;
- int isneg = 0;
- int64_t number = 0;
- int d;
-
// Skip leading spaces.
while (isspace_ascii(*p)) {
++p;
}
// Handle sign.
- if (*p == '-') {
- isneg = 1;
- ++p;
- } else if (*p == '+') {
+ const bool isneg = *p == '-' ? true : false;
+ // Handle sign.
+ if (isneg || (*p == '+')) {
p++;
}
@@ -1906,6 +1839,7 @@ int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max,
return 0;
}
+ int64_t number = 0;
if (isneg) {
// If number is greater than pre_min, at least one more digit
// can be processed without overflowing.
@@ -1913,7 +1847,7 @@ int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max,
int64_t pre_min = int_min / 10;
// Process the digits.
- d = *p;
+ char d = *p;
if (tsep != '\0') {
while (1) {
if (d == tsep) {
@@ -1950,7 +1884,7 @@ int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max,
int dig_pre_max = int_max % 10;
// Process the digits.
- d = *p;
+ char d = *p;
if (tsep != '\0') {
while (1) {
if (d == tsep) {
@@ -2002,11 +1936,6 @@ int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max,
uint64_t str_to_uint64(uint_state *state, const char *p_item, int64_t int_max,
uint64_t uint_max, int *error, char tsep) {
const char *p = p_item;
- uint64_t pre_max = uint_max / 10;
- int dig_pre_max = uint_max % 10;
- uint64_t number = 0;
- int d;
-
// Skip leading spaces.
while (isspace_ascii(*p)) {
++p;
@@ -2032,7 +1961,10 @@ uint64_t str_to_uint64(uint_state *state, const char *p_item, int64_t int_max,
// can be processed without overflowing.
//
// Process the digits.
- d = *p;
+ uint64_t number = 0;
+ const uint64_t pre_max = uint_max / 10;
+ const uint64_t dig_pre_max = uint_max % 10;
+ char d = *p;
if (tsep != '\0') {
while (1) {
if (d == tsep) {
| Same pattern as before - remove C89 constructs, add const / static where possible
This has an added tweak to remove unnecessary int return values from destructors | https://api.github.com/repos/pandas-dev/pandas/pulls/56274 | 2023-12-01T01:37:36Z | 2023-12-01T18:36:42Z | 2023-12-01T18:36:42Z | 2023-12-01T23:25:09Z |
DOC: Added Documentation for MultiIndex.codes | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 86693f241ddb1..2eea0c2dff84c 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1079,6 +1079,42 @@ def levshape(self) -> Shape:
@property
def codes(self) -> FrozenList:
+ '''
+ codes in MultiIndex refers to Integer codes for each level of
+ MultiIndex.
+
+ The `codes` attribute represents the integer codes that are used
+ internally to represent each label in the corresponding level of the
+ MultiIndex. These codes facilitate efficient storage and retrieval of
+ hierarchical data.
+
+ Each element in the list corresponds to a level in the MultiIndex,
+ and the arrays within the list contain the integer codes for the
+ labels at that level.
+
+ Returns
+ -------
+ pandas.core.indexes.frozen.FrozenList
+ A frozen list containing integer codes for each level of the MultiIndex.
+
+ Example
+ -------
+ >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
+ >>> df.set_index(['A', 'B'], inplace=True)
+ >>> df.index.codes
+ [array([0, 1, 2]), array([0, 1, 2])]
+
+ >>> levels = [[2, 3, 1], ['Gold', 'Silver', 'Bronze']]
+ >>> codes = [[2, 0, 1], [0, 1, 2]]
+ >>> index = pd.MultiIndex(levels=levels, codes=codes)
+ >>> index
+ MultiIndex([(1, 'Gold'),
+ (2, 'Silver'),
+ (3, 'Bronze')],
+ )
+ >>> index.codes
+ FrozenList([[2, 0, 1], [0, 1, 2]])
+ '''
return self._codes
def _set_codes(
| - [X] closes #55437 (Replace xxxx with the GitHub issue number)
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/56265 | 2023-11-30T20:38:25Z | 2023-12-01T05:26:15Z | null | 2023-12-01T05:26:21Z |
TST/CLN: Remove makeTime methods | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index ead00cd778d7b..676757d8e095f 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -6,7 +6,6 @@
import operator
import os
import re
-import string
from sys import byteorder
from typing import (
TYPE_CHECKING,
@@ -109,7 +108,6 @@
from pandas.core.arrays import ArrowExtensionArray
_N = 30
-_K = 4
UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
@@ -341,10 +339,6 @@ def to_array(obj):
# Others
-def getCols(k) -> str:
- return string.ascii_uppercase[:k]
-
-
def makeTimeSeries(nper=None, freq: Frequency = "B", name=None) -> Series:
if nper is None:
nper = _N
@@ -355,16 +349,6 @@ def makeTimeSeries(nper=None, freq: Frequency = "B", name=None) -> Series:
)
-def getTimeSeriesData(nper=None, freq: Frequency = "B") -> dict[str, Series]:
- return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
-
-
-# make frame
-def makeTimeDataFrame(nper=None, freq: Frequency = "B") -> DataFrame:
- data = getTimeSeriesData(nper, freq)
- return DataFrame(data)
-
-
def makeCustomIndex(
nentries,
nlevels,
@@ -887,7 +871,6 @@ def shares_memory(left, right) -> bool:
"external_error_raised",
"FLOAT_EA_DTYPES",
"FLOAT_NUMPY_DTYPES",
- "getCols",
"get_cython_table_params",
"get_dtype",
"getitem",
@@ -895,13 +878,11 @@ def shares_memory(left, right) -> bool:
"get_finest_unit",
"get_obj",
"get_op_from_name",
- "getTimeSeriesData",
"iat",
"iloc",
"loc",
"makeCustomDataframe",
"makeCustomIndex",
- "makeTimeDataFrame",
"makeTimeSeries",
"maybe_produces_warning",
"NARROW_NP_DTYPES",
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 9ed6f8f43ae03..6401d4b5981e0 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -550,7 +550,11 @@ def multiindex_year_month_day_dataframe_random_data():
DataFrame with 3 level MultiIndex (year, month, day) covering
first 100 business days from 2000-01-01 with random data
"""
- tdf = tm.makeTimeDataFrame(100)
+ tdf = DataFrame(
+ np.random.default_rng(2).standard_normal((100, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100, freq="B"),
+ )
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use int64 Index, to make sure things work
ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index 99ea565e5b60c..e07024b2e2a09 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -7,7 +7,6 @@
NaT,
date_range,
)
-import pandas._testing as tm
@pytest.fixture
@@ -16,27 +15,12 @@ def datetime_frame() -> DataFrame:
Fixture for DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']
-
- A B C D
- 2000-01-03 -1.122153 0.468535 0.122226 1.693711
- 2000-01-04 0.189378 0.486100 0.007864 -1.216052
- 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
- 2000-01-06 0.430050 0.894352 0.090719 0.036939
- 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
- 2000-01-10 -0.752633 0.328434 -0.815325 0.699674
- 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
- ... ... ... ... ...
- 2000-02-03 1.642618 -0.579288 0.046005 1.385249
- 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
- 2000-02-07 -2.656149 -0.601387 1.410148 0.444150
- 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
- 2000-02-09 1.377373 0.398619 1.008453 -0.928207
- 2000-02-10 0.473194 -0.636677 0.984058 0.511519
- 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
-
- [30 rows x 4 columns]
"""
- return DataFrame(tm.getTimeSeriesData())
+ return DataFrame(
+ np.random.default_rng(2).standard_normal((100, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100, freq="B"),
+ )
@pytest.fixture
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 135a86cad1395..dfb4a3092789a 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -584,7 +584,7 @@ def test_fancy_getitem_slice_mixed(
tm.assert_frame_equal(float_frame, original)
def test_getitem_setitem_non_ix_labels(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(range(20), index=date_range("2020-01-01", periods=20))
start, end = df.index[[5, 10]]
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 359e9122b0c0b..108816697ef3e 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -6,7 +6,9 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
Series,
+ date_range,
isna,
)
import pandas._testing as tm
@@ -325,8 +327,12 @@ def test_corrwith(self, datetime_frame, dtype):
tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame()
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy()
cols = ["A", "B", "C", "D"]
df1["obj"] = "foo"
diff --git a/pandas/tests/frame/methods/test_first_and_last.py b/pandas/tests/frame/methods/test_first_and_last.py
index 23355a5549a88..212e56442ee07 100644
--- a/pandas/tests/frame/methods/test_first_and_last.py
+++ b/pandas/tests/frame/methods/test_first_and_last.py
@@ -1,12 +1,15 @@
"""
Note: includes tests for `last`
"""
+import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
+ Index,
bdate_range,
+ date_range,
)
import pandas._testing as tm
@@ -16,13 +19,21 @@
class TestFirst:
def test_first_subset(self, frame_or_series):
- ts = tm.makeTimeDataFrame(freq="12h")
+ ts = DataFrame(
+ np.random.default_rng(2).standard_normal((100, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100, freq="12h"),
+ )
ts = tm.get_obj(ts, frame_or_series)
with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
result = ts.first("10d")
assert len(result) == 20
- ts = tm.makeTimeDataFrame(freq="D")
+ ts = DataFrame(
+ np.random.default_rng(2).standard_normal((100, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100, freq="D"),
+ )
ts = tm.get_obj(ts, frame_or_series)
with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
result = ts.first("10d")
@@ -64,13 +75,21 @@ def test_first_last_raises(self, frame_or_series):
obj.last("1D")
def test_last_subset(self, frame_or_series):
- ts = tm.makeTimeDataFrame(freq="12h")
+ ts = DataFrame(
+ np.random.default_rng(2).standard_normal((100, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100, freq="12h"),
+ )
ts = tm.get_obj(ts, frame_or_series)
with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
result = ts.last("10d")
assert len(result) == 20
- ts = tm.makeTimeDataFrame(nper=30, freq="D")
+ ts = DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=30, freq="D"),
+ )
ts = tm.get_obj(ts, frame_or_series)
with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg):
result = ts.last("10d")
diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py
index 4c4b04076c8d5..12077952c2e03 100644
--- a/pandas/tests/frame/methods/test_truncate.py
+++ b/pandas/tests/frame/methods/test_truncate.py
@@ -60,7 +60,7 @@ def test_truncate(self, datetime_frame, frame_or_series):
truncated = ts.truncate(before=ts.index[-1] + ts.index.freq)
assert len(truncated) == 0
- msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
+ msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-05-16 00:00:00"
with pytest.raises(ValueError, match=msg):
ts.truncate(
before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 7fd795dc84cca..a4825c80ee815 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1523,8 +1523,12 @@ def test_combineFunc(self, float_frame, mixed_float_frame):
[operator.eq, operator.ne, operator.lt, operator.gt, operator.ge, operator.le],
)
def test_comparisons(self, simple_frame, float_frame, func):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame()
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=pd.date_range("2000-01-01", periods=30, freq="B"),
+ )
+ df2 = df1.copy()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 1f08b9d5c35b8..6564e381af0ea 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -10,7 +10,9 @@
from pandas import (
DataFrame,
+ Index,
Series,
+ date_range,
)
import pandas._testing as tm
@@ -328,12 +330,16 @@ def test_squeeze_series_noop(self, ser):
def test_squeeze_frame_noop(self):
# noop
- df = tm.makeTimeDataFrame()
+ df = DataFrame(np.eye(2))
tm.assert_frame_equal(df.squeeze(), df)
def test_squeeze_frame_reindex(self):
# squeezing
- df = tm.makeTimeDataFrame().reindex(columns=["A"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ ).reindex(columns=["A"])
tm.assert_series_equal(df.squeeze(), df["A"])
def test_squeeze_0_len_dim(self):
@@ -345,7 +351,11 @@ def test_squeeze_0_len_dim(self):
def test_squeeze_axis(self):
# axis argument
- df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((1, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=1, freq="B"),
+ ).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
@@ -360,14 +370,22 @@ def test_squeeze_axis(self):
df.squeeze(axis="x")
def test_squeeze_axis_len_3(self):
- df = tm.makeTimeDataFrame(3)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((3, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=3, freq="B"),
+ )
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = Series(range(2), dtype=np.float64)
tm.assert_series_equal(np.squeeze(s), s)
- df = tm.makeTimeDataFrame().reindex(columns=["A"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ ).reindex(columns=["A"])
tm.assert_series_equal(np.squeeze(df), df["A"])
@pytest.mark.parametrize(
@@ -382,11 +400,19 @@ def test_transpose_series(self, ser):
tm.assert_series_equal(ser.transpose(), ser)
def test_transpose_frame(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
tm.assert_frame_equal(df.transpose().transpose(), df)
def test_numpy_transpose(self, frame_or_series):
- obj = tm.makeTimeDataFrame()
+ obj = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
obj = tm.get_obj(obj, frame_or_series)
if frame_or_series is Series:
@@ -419,7 +445,11 @@ def test_take_series(self, ser):
def test_take_frame(self):
indices = [1, 5, -2, 6, 3, -1]
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
out = df.take(indices)
expected = DataFrame(
data=df.values.take(indices, axis=0),
@@ -431,7 +461,7 @@ def test_take_frame(self):
def test_take_invalid_kwargs(self, frame_or_series):
indices = [-3, 2, 0, 1]
- obj = tm.makeTimeDataFrame()
+ obj = DataFrame(range(5))
obj = tm.get_obj(obj, frame_or_series)
msg = r"take\(\) got an unexpected keyword argument 'foo'"
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 3ba1510cc6b1d..c3bcd30796e63 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -161,7 +161,11 @@ def test_agg_apply_corner(ts, tsframe):
def test_agg_grouping_is_list_tuple(ts):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=pd.date_range("2000-01-01", periods=30, freq="B"),
+ )
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouping_vector
diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py
index b8fb3b7fff676..6d4a874f9d3ec 100644
--- a/pandas/tests/groupby/conftest.py
+++ b/pandas/tests/groupby/conftest.py
@@ -1,7 +1,11 @@
import numpy as np
import pytest
-from pandas import DataFrame
+from pandas import (
+ DataFrame,
+ Index,
+ date_range,
+)
import pandas._testing as tm
from pandas.core.groupby.base import (
reduction_kernels,
@@ -48,7 +52,11 @@ def ts():
@pytest.fixture
def tsframe():
- return DataFrame(tm.getTimeSeriesData())
+ return DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=30, freq="B"),
+ )
@pytest.fixture
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 5b17484de9c93..254a12d9bdebb 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -319,7 +319,11 @@ def test_pass_args_kwargs_duplicate_columns(tsframe, as_index):
def test_len():
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
@@ -327,6 +331,8 @@ def test_len():
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
+
+def test_len_nan_group():
# issue 11016
df = DataFrame({"a": [np.nan] * 3, "b": [1, 2, 3]})
assert len(df.groupby("a")) == 0
@@ -940,7 +946,11 @@ def test_groupby_as_index_corner(df, ts):
def test_groupby_multiple_key():
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
@@ -1655,7 +1665,11 @@ def test_dont_clobber_name_column():
def test_skip_group_keys():
- tsf = tm.makeTimeDataFrame()
+ tsf = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index a6f160d92fb66..35699fe9647d7 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -10,6 +10,7 @@
from pandas import (
Categorical,
DataFrame,
+ Index,
MultiIndex,
Series,
Timestamp,
@@ -67,7 +68,11 @@ def demean(arr):
tm.assert_frame_equal(result, expected)
# GH 8430
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((50, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=50, freq="B"),
+ )
g = df.groupby(pd.Grouper(freq="ME"))
g.transform(lambda x: x - 1)
@@ -115,7 +120,7 @@ def test_transform_fast2():
)
result = df.groupby("grouping").transform("first")
- dates = pd.Index(
+ dates = Index(
[
Timestamp("2014-1-1"),
Timestamp("2014-1-2"),
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index eae7e46c7ec35..d270741a0e0bc 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -13,6 +13,7 @@
import pandas as pd
from pandas import (
Categorical,
+ DataFrame,
Index,
MultiIndex,
date_range,
@@ -37,7 +38,11 @@ def test_slice_locs_partial(self, idx):
assert result == (2, 4)
def test_slice_locs(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((50, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=50, freq="B"),
+ )
stacked = df.stack(future_stack=True)
idx = stacked.index
@@ -57,7 +62,11 @@ def test_slice_locs(self):
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
stacked = df.stack(future_stack=True)
idx = stacked.index
with pytest.raises(TypeError, match="^Level type mismatch"):
@@ -861,7 +870,7 @@ def test_timestamp_multiindex_indexer():
[3],
]
)
- df = pd.DataFrame({"foo": np.arange(len(idx))}, idx)
+ df = DataFrame({"foo": np.arange(len(idx))}, idx)
result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"]
qidx = MultiIndex.from_product(
[
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 2f9018112c03b..ca551024b4c1f 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -536,7 +536,11 @@ def test_series_partial_set_with_name(self):
@pytest.mark.parametrize("key", [100, 100.0])
def test_setitem_with_expansion_numeric_into_datetimeindex(self, key):
# GH#4940 inserting non-strings
- orig = tm.makeTimeDataFrame()
+ orig = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df = orig.copy()
df.loc[key, :] = df.iloc[0]
@@ -550,7 +554,11 @@ def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
- orig = tm.makeTimeDataFrame()
+ orig = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
# allow object conversion here
df = orig.copy()
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 4dfae753edf72..74286a3ddd8ed 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -20,6 +20,7 @@
DataFrame,
Index,
MultiIndex,
+ date_range,
option_context,
)
import pandas._testing as tm
@@ -271,7 +272,7 @@ def test_excel_multindex_roundtrip(
def test_read_excel_parse_dates(self, ext):
# see gh-11544, gh-12051
df = DataFrame(
- {"col": [1, 2, 3], "date_strings": pd.date_range("2012-01-01", periods=3)}
+ {"col": [1, 2, 3], "date_strings": date_range("2012-01-01", periods=3)}
)
df2 = df.copy()
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
@@ -460,7 +461,11 @@ def test_mixed(self, frame, path):
tm.assert_frame_equal(mixed_frame, recons)
def test_ts_frame(self, path):
- df = tm.makeTimeDataFrame()[:5]
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
# freq doesn't round-trip
index = pd.DatetimeIndex(np.asarray(df.index), freq=None)
@@ -533,7 +538,11 @@ def test_inf_roundtrip(self, path):
def test_sheets(self, frame, path):
# freq doesn't round-trip
- tsframe = tm.makeTimeDataFrame()[:5]
+ tsframe = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
tsframe.index = index
@@ -653,7 +662,11 @@ def test_excel_roundtrip_datetime(self, merge_cells, path):
# datetime.date, not sure what to test here exactly
# freq does not round-trip
- tsframe = tm.makeTimeDataFrame()[:5]
+ tsframe = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
tsframe.index = index
@@ -772,7 +785,11 @@ def test_to_excel_timedelta(self, path):
def test_to_excel_periodindex(self, path):
# xp has a PeriodIndex
- df = tm.makeTimeDataFrame()[:5]
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
xp = df.resample("ME").mean().to_period("M")
xp.to_excel(path, sheet_name="sht1")
@@ -837,7 +854,11 @@ def test_to_excel_multiindex_cols(self, merge_cells, frame, path):
def test_to_excel_multiindex_dates(self, merge_cells, path):
# try multiindex with dates
- tsframe = tm.makeTimeDataFrame()[:5]
+ tsframe = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
new_index = [tsframe.index, np.arange(len(tsframe.index), dtype=np.int64)]
tsframe.index = MultiIndex.from_arrays(new_index)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 79be90cd00469..428c73c282426 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -23,8 +23,10 @@
NA,
DataFrame,
DatetimeIndex,
+ Index,
Series,
Timestamp,
+ date_range,
read_json,
)
import pandas._testing as tm
@@ -115,7 +117,11 @@ def datetime_series(self):
def datetime_frame(self):
# Same as usual datetime_frame, but with index freq set to None,
# since that doesn't round-trip, see GH#33711
- df = DataFrame(tm.getTimeSeriesData())
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=30, freq="B"),
+ )
df.index = df.index._with_freq(None)
return df
@@ -266,7 +272,7 @@ def test_roundtrip_empty(self, orient, convert_axes):
data = StringIO(empty_frame.to_json(orient=orient))
result = read_json(data, orient=orient, convert_axes=convert_axes)
if orient == "split":
- idx = pd.Index([], dtype=(float if convert_axes else object))
+ idx = Index([], dtype=(float if convert_axes else object))
expected = DataFrame(index=idx, columns=idx)
elif orient in ["index", "columns"]:
expected = DataFrame()
@@ -294,7 +300,7 @@ def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame):
@pytest.mark.parametrize("convert_axes", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes):
- index = pd.Index(["a", "b", "c", "d", "e"])
+ index = Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
@@ -495,7 +501,7 @@ def test_frame_mixedtype_orient(self): # GH10289
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
- dti = pd.date_range("2000-01-03", "2000-01-07")
+ dti = date_range("2000-01-03", "2000-01-07")
# freq doesn't roundtrip
dti = DatetimeIndex(np.asarray(dti), freq=None)
df = DataFrame(
@@ -525,7 +531,7 @@ def test_v12_compat(self, datapath):
tm.assert_frame_equal(df_iso, df_unser_iso, check_column_type=False)
def test_blocks_compat_GH9037(self, using_infer_string):
- index = pd.date_range("20000101", periods=10, freq="h")
+ index = date_range("20000101", periods=10, freq="h")
# freq doesn't round-trip
index = DatetimeIndex(list(index), freq=None)
@@ -1034,7 +1040,7 @@ def test_doc_example(self):
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
- dfj2.index = pd.date_range("20130101", periods=5)
+ dfj2.index = date_range("20130101", periods=5)
json = StringIO(dfj2.to_json())
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
@@ -1078,7 +1084,7 @@ def test_timedelta(self):
result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
tm.assert_series_equal(result, ser)
- ser = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
+ ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1]))
assert ser.dtype == "timedelta64[ns]"
result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
tm.assert_series_equal(result, ser)
@@ -1094,7 +1100,7 @@ def test_timedelta2(self):
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
- "c": pd.date_range(start="20130101", periods=2),
+ "c": date_range(start="20130101", periods=2),
}
)
data = StringIO(frame.to_json(date_unit="ns"))
@@ -1209,10 +1215,10 @@ def test_categorical(self):
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
- tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
+ tz_range = date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
- df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
+ df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
@@ -1265,9 +1271,9 @@ def test_tz_is_naive(self):
@pytest.mark.parametrize(
"tz_range",
[
- pd.date_range("2013-01-01 05:00:00Z", periods=2),
- pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
- pd.date_range("2013-01-01 00:00:00-0500", periods=2),
+ date_range("2013-01-01 05:00:00Z", periods=2),
+ date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
+ date_range("2013-01-01 00:00:00-0500", periods=2),
],
)
def test_tz_range_is_utc(self, tz_range):
@@ -1290,7 +1296,7 @@ def test_tz_range_is_utc(self, tz_range):
assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)
def test_tz_range_is_naive(self):
- dti = pd.date_range("2013-01-01 05:00:00", periods=2)
+ dti = date_range("2013-01-01 05:00:00", periods=2)
exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]'
dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}'
@@ -1926,7 +1932,7 @@ def test_to_json_multiindex_escape(self):
# GH 15273
df = DataFrame(
True,
- index=pd.date_range("2017-01-20", "2017-01-23"),
+ index=date_range("2017-01-20", "2017-01-23"),
columns=["foo", "bar"],
).stack(future_stack=True)
result = df.to_json()
@@ -2128,8 +2134,8 @@ def test_json_roundtrip_string_inference(orient):
expected = DataFrame(
[["a", "b"], ["c", "d"]],
dtype="string[pyarrow_numpy]",
- index=pd.Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"),
- columns=pd.Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"),
+ index=Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"),
+ columns=Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"),
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index 9eb9ffa53dd22..6cb4a4ad47440 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -33,7 +33,11 @@ def test_append(setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((20, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=20, freq="B"),
+ )
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
@@ -279,7 +283,11 @@ def test_append_all_nans(setup_path):
def test_append_frame_column_oriented(setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df.index = df.index._with_freq(None) # freq doesn't round-trip
_maybe_remove(store, "df1")
@@ -427,7 +435,11 @@ def check_col(key, name, size):
# with nans
_maybe_remove(store, "df")
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df["string"] = "foo"
df.loc[df.index[1:4], "string"] = np.nan
df["string2"] = "bar"
@@ -487,7 +499,11 @@ def test_append_with_empty_string(setup_path):
def test_append_with_data_columns(setup_path):
with ensure_clean_store(setup_path) as store:
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
@@ -854,8 +870,12 @@ def test_append_with_timedelta(setup_path):
def test_append_to_multiple(setup_path):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
@@ -887,8 +907,16 @@ def test_append_to_multiple(setup_path):
def test_append_to_multiple_dropna(setup_path):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ ).rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
@@ -904,8 +932,12 @@ def test_append_to_multiple_dropna(setup_path):
def test_append_to_multiple_dropna_false(setup_path):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
diff --git a/pandas/tests/io/pytables/test_errors.py b/pandas/tests/io/pytables/test_errors.py
index d956e4f5775eb..2021101098892 100644
--- a/pandas/tests/io/pytables/test_errors.py
+++ b/pandas/tests/io/pytables/test_errors.py
@@ -98,7 +98,11 @@ def test_unimplemented_dtypes_table_columns(setup_path):
def test_invalid_terms(tmp_path, setup_path):
with ensure_clean_store(setup_path) as store:
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py
index 2920f0b07b31e..40397c49f12d2 100644
--- a/pandas/tests/io/pytables/test_file_handling.py
+++ b/pandas/tests/io/pytables/test_file_handling.py
@@ -20,6 +20,7 @@
Index,
Series,
_testing as tm,
+ date_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
@@ -36,7 +37,11 @@
@pytest.mark.parametrize("mode", ["r", "r+", "a", "w"])
def test_mode(setup_path, tmp_path, mode):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
msg = r"[\S]* does not exist"
path = tmp_path / setup_path
@@ -85,7 +90,11 @@ def test_mode(setup_path, tmp_path, mode):
def test_default_mode(tmp_path, setup_path):
# read_hdf uses default mode
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
path = tmp_path / setup_path
df.to_hdf(path, key="df", mode="w")
result = read_hdf(path, "df")
diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py
index 8a6e3c9006439..df47bd78c86b8 100644
--- a/pandas/tests/io/pytables/test_put.py
+++ b/pandas/tests/io/pytables/test_put.py
@@ -97,7 +97,11 @@ def test_api_default_format(tmp_path, setup_path):
def test_put(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((20, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=20, freq="B"),
+ )
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
@@ -153,7 +157,11 @@ def test_put_string_index(setup_path):
def test_put_compression(setup_path):
with ensure_clean_store(setup_path) as store:
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
@@ -166,7 +174,11 @@ def test_put_compression(setup_path):
@td.skip_if_windows
def test_put_compression_blosc(setup_path):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
@@ -179,7 +191,11 @@ def test_put_compression_blosc(setup_path):
def test_put_mixed_type(setup_path):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
diff --git a/pandas/tests/io/pytables/test_read.py b/pandas/tests/io/pytables/test_read.py
index 2030b1eca3203..e4a3ea1fc9db8 100644
--- a/pandas/tests/io/pytables/test_read.py
+++ b/pandas/tests/io/pytables/test_read.py
@@ -15,6 +15,7 @@
Index,
Series,
_testing as tm,
+ date_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
@@ -72,7 +73,11 @@ def test_read_missing_key_opened_store(tmp_path, setup_path):
def test_read_column(setup_path):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 693f10172a99e..2c61da3809010 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -15,6 +15,7 @@
Series,
_testing as tm,
bdate_range,
+ date_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
@@ -372,7 +373,11 @@ def test_frame(compression, setup_path):
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
- tdf = tm.makeTimeDataFrame()
+ tdf = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
_check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py
index 3eaa1e86dbf6d..0e303d1c890c5 100644
--- a/pandas/tests/io/pytables/test_select.py
+++ b/pandas/tests/io/pytables/test_select.py
@@ -130,7 +130,11 @@ def test_select_with_dups(setup_path):
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
# select with columns=
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
@@ -331,7 +335,11 @@ def test_select_with_many_inputs(setup_path):
def test_select_iterator(tmp_path, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
- df = tm.makeTimeDataFrame(500)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
_maybe_remove(store, "df")
store.append("df", df)
@@ -341,33 +349,41 @@ def test_select_iterator(tmp_path, setup_path):
result = concat(results)
tm.assert_frame_equal(expected, result)
- results = list(store.select("df", chunksize=100))
+ results = list(store.select("df", chunksize=2))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
- results = list(store.select("df", chunksize=150))
+ results = list(store.select("df", chunksize=2))
result = concat(results)
tm.assert_frame_equal(result, expected)
path = tmp_path / setup_path
- df = tm.makeTimeDataFrame(500)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df.to_hdf(path, key="df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
- read_hdf(path, "df_non_table", chunksize=100)
+ read_hdf(path, "df_non_table", chunksize=2)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
path = tmp_path / setup_path
- df = tm.makeTimeDataFrame(500)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df.to_hdf(path, key="df", format="table")
- results = list(read_hdf(path, "df", chunksize=100))
+ results = list(read_hdf(path, "df", chunksize=2))
result = concat(results)
assert len(results) == 5
@@ -377,9 +393,13 @@ def test_select_iterator(tmp_path, setup_path):
# multiple
with ensure_clean_store(setup_path) as store:
- df1 = tm.makeTimeDataFrame(500)
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
store.append("df1", df1, data_columns=True)
- df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
+ df2 = df1.copy().rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
@@ -388,7 +408,7 @@ def test_select_iterator(tmp_path, setup_path):
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
- store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
+ store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=2)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
@@ -401,7 +421,11 @@ def test_select_iterator_complete_8014(setup_path):
# no iterator
with ensure_clean_store(setup_path) as store:
- expected = tm.makeTimeDataFrame(100064, "s")
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
_maybe_remove(store, "df")
store.append("df", expected)
@@ -432,7 +456,11 @@ def test_select_iterator_complete_8014(setup_path):
# with iterator, full range
with ensure_clean_store(setup_path) as store:
- expected = tm.makeTimeDataFrame(100064, "s")
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
_maybe_remove(store, "df")
store.append("df", expected)
@@ -470,7 +498,11 @@ def test_select_iterator_non_complete_8014(setup_path):
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
- expected = tm.makeTimeDataFrame(100064, "s")
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
_maybe_remove(store, "df")
store.append("df", expected)
@@ -500,7 +532,11 @@ def test_select_iterator_non_complete_8014(setup_path):
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
- expected = tm.makeTimeDataFrame(100064, "s")
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
_maybe_remove(store, "df")
store.append("df", expected)
@@ -520,7 +556,11 @@ def test_select_iterator_many_empty_frames(setup_path):
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
- expected = tm.makeTimeDataFrame(100000, "s")
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
_maybe_remove(store, "df")
store.append("df", expected)
@@ -568,7 +608,11 @@ def test_select_iterator_many_empty_frames(setup_path):
def test_frame_select(setup_path):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
@@ -589,7 +633,11 @@ def test_frame_select(setup_path):
tm.assert_frame_equal(result, expected)
# invalid terms
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
store.append("df_time", df)
msg = "day is out of range for month: 0"
with pytest.raises(ValueError, match=msg):
@@ -604,7 +652,11 @@ def test_frame_select(setup_path):
def test_frame_select_complex(setup_path):
# select via complex criteria
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
@@ -717,7 +769,11 @@ def test_frame_select_complex2(tmp_path):
def test_invalid_filtering(setup_path):
# can't use more than one filter (atm)
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
@@ -735,7 +791,11 @@ def test_invalid_filtering(setup_path):
def test_string_select(setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
# test string ==/!=
df["x"] = "none"
@@ -775,8 +835,12 @@ def test_string_select(setup_path):
def test_select_as_multiple(setup_path):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
@@ -836,7 +900,8 @@ def test_select_as_multiple(setup_path):
tm.assert_frame_equal(result, expected)
# test exception for diff rows
- store.append("df3", tm.makeTimeDataFrame(nper=50))
+ df3 = df1.copy().head(2)
+ store.append("df3", df3)
msg = "all tables must have exactly the same nrows!"
with pytest.raises(ValueError, match=msg):
store.select_as_multiple(
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 98257f1765d53..057f1b1fd19c3 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -201,7 +201,11 @@ def test_versioning(setup_path):
columns=Index(list("ABCD"), dtype=object),
index=Index([f"i-{i}" for i in range(30)], dtype=object),
)
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((20, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=20, freq="B"),
+ )
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
@@ -295,7 +299,11 @@ def test_getattr(setup_path):
result = getattr(store, "a")
tm.assert_series_equal(result, s)
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
@@ -395,7 +403,11 @@ def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
@@ -426,7 +438,11 @@ def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
@@ -640,7 +656,11 @@ def test_store_series_name(setup_path):
def test_overwrite_node(setup_path):
with ensure_clean_store(setup_path) as store:
- store["a"] = tm.makeTimeDataFrame()
+ store["a"] = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
ts = tm.makeTimeSeries()
store["a"] = ts
@@ -648,7 +668,11 @@ def test_overwrite_node(setup_path):
def test_coordinates(setup_path):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
@@ -679,8 +703,12 @@ def test_coordinates(setup_path):
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index d7c69ff17749c..e20c49c072515 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -4119,8 +4119,12 @@ def tquery(query, con=None):
def test_xsqlite_basic(sqlite_buildin):
- frame = tm.makeTimeDataFrame()
- assert sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 30
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ assert sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 10
result = sql.read_sql("select * from test_table", sqlite_buildin)
# HACK! Change this once indexes are handled properly.
@@ -4133,7 +4137,7 @@ def test_xsqlite_basic(sqlite_buildin):
frame2 = frame.copy()
new_idx = Index(np.arange(len(frame2)), dtype=np.int64) + 10
frame2["Idx"] = new_idx.copy()
- assert sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) == 30
+ assert sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) == 10
result = sql.read_sql("select * from test_table2", sqlite_buildin, index_col="Idx")
expected = frame.copy()
expected.index = new_idx
@@ -4142,7 +4146,11 @@ def test_xsqlite_basic(sqlite_buildin):
def test_xsqlite_write_row_by_row(sqlite_buildin):
- frame = tm.makeTimeDataFrame()
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, "test")
cur = sqlite_buildin.cursor()
@@ -4161,7 +4169,11 @@ def test_xsqlite_write_row_by_row(sqlite_buildin):
def test_xsqlite_execute(sqlite_buildin):
- frame = tm.makeTimeDataFrame()
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
create_sql = sql.get_schema(frame, "test")
cur = sqlite_buildin.cursor()
cur.execute(create_sql)
@@ -4178,7 +4190,11 @@ def test_xsqlite_execute(sqlite_buildin):
def test_xsqlite_schema(sqlite_buildin):
- frame = tm.makeTimeDataFrame()
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for line in lines:
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 2a864abc5ea4a..45dc612148f40 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -19,6 +19,7 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
MultiIndex,
PeriodIndex,
Series,
@@ -53,19 +54,31 @@
class TestDataFramePlots:
@pytest.mark.slow
def test_plot(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
_check_plot_works(df.plot, grid=False)
@pytest.mark.slow
def test_plot_subplots(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
# _check_plot_works adds an ax so use default_axes=True to avoid warning
axes = _check_plot_works(df.plot, default_axes=True, subplots=True)
_check_axes_shape(axes, axes_num=4, layout=(4, 1))
@pytest.mark.slow
def test_plot_subplots_negative_layout(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
axes = _check_plot_works(
df.plot,
default_axes=True,
@@ -76,7 +89,11 @@ def test_plot_subplots_negative_layout(self):
@pytest.mark.slow
def test_plot_subplots_use_index(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
axes = _check_plot_works(
df.plot,
default_axes=True,
@@ -286,7 +303,11 @@ def test_donot_overwrite_index_name(self):
def test_plot_xy(self):
# columns.inferred_type == 'string'
- df = tm.makeTimeDataFrame(5)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
_check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot())
_check_data(df.plot(x=0), df.set_index("A").plot())
_check_data(df.plot(y=0), df.B.plot())
@@ -295,7 +316,11 @@ def test_plot_xy(self):
_check_data(df.plot(y="B"), df.B.plot())
def test_plot_xy_int_cols(self):
- df = tm.makeTimeDataFrame(5)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
# columns.inferred_type == 'integer'
df.columns = np.arange(1, len(df.columns) + 1)
_check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
@@ -303,7 +328,11 @@ def test_plot_xy_int_cols(self):
_check_data(df.plot(y=1), df[1].plot())
def test_plot_xy_figsize_and_title(self):
- df = tm.makeTimeDataFrame(5)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="B"),
+ )
# figsize and title
ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8))
_check_text_labels(ax.title, "Test")
@@ -345,14 +374,22 @@ def test_invalid_logscale(self, input_param):
df.plot.pie(subplots=True, **{input_param: True})
def test_xcompat(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
_check_ticks_props(ax, xrot=30)
def test_xcompat_plot_params(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
plotting.plot_params["xaxis.compat"] = True
ax = df.plot()
lines = ax.get_lines()
@@ -360,7 +397,11 @@ def test_xcompat_plot_params(self):
_check_ticks_props(ax, xrot=30)
def test_xcompat_plot_params_x_compat(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
plotting.plot_params["x_compat"] = False
ax = df.plot()
@@ -371,7 +412,11 @@ def test_xcompat_plot_params_x_compat(self):
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_xcompat_plot_params_context_manager(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
# useful if you're plotting a bunch together
with plotting.plot_params.use("x_compat", True):
ax = df.plot()
@@ -380,7 +425,11 @@ def test_xcompat_plot_params_context_manager(self):
_check_ticks_props(ax, xrot=30)
def test_xcompat_plot_period(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
@@ -405,7 +454,7 @@ def test_period_compat(self):
def test_unsorted_index(self, index_dtype):
df = DataFrame(
{"y": np.arange(100)},
- index=pd.Index(np.arange(99, -1, -1), dtype=index_dtype),
+ index=Index(np.arange(99, -1, -1), dtype=index_dtype),
dtype=np.int64,
)
ax = df.plot()
@@ -723,7 +772,7 @@ def test_bar_nan_stacked(self):
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
- @pytest.mark.parametrize("idx", [pd.Index, pd.CategoricalIndex])
+ @pytest.mark.parametrize("idx", [Index, pd.CategoricalIndex])
def test_bar_categorical(self, idx):
# GH 13019
df = DataFrame(
@@ -1391,7 +1440,7 @@ def test_unordered_ts(self):
# the ticks are sorted
xticks = ax.xaxis.get_ticklabels()
xlocs = [x.get_position()[0] for x in xticks]
- assert pd.Index(xlocs).is_monotonic_increasing
+ assert Index(xlocs).is_monotonic_increasing
xlabels = [x.get_text() for x in xticks]
assert pd.to_datetime(xlabels, format="%Y-%m-%d").is_monotonic_increasing
@@ -2062,9 +2111,17 @@ def test_memory_leak(self, kind):
)
args = {"x": "A", "y": "B"}
elif kind == "area":
- df = tm.makeTimeDataFrame().abs()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ ).abs()
else:
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
@@ -2513,7 +2570,11 @@ def test_secondary_y(self, secondary_y):
def test_plot_no_warning(self):
# GH 55138
# TODO(3.0): this can be removed once Period[B] deprecation is enforced
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
with tm.assert_produces_warning(False):
_ = df.plot()
_ = df.T.plot()
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 3195b7637ee3c..401a7610b25d8 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1247,7 +1247,11 @@ def test_secondary_legend(self):
ax = fig.add_subplot(211)
# ts
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df.plot(secondary_y=["A", "B"], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
@@ -1265,7 +1269,11 @@ def test_secondary_legend(self):
mpl.pyplot.close(fig)
def test_secondary_legend_right(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
fig = mpl.pyplot.figure()
ax = fig.add_subplot(211)
df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax)
@@ -1278,7 +1286,11 @@ def test_secondary_legend_right(self):
mpl.pyplot.close(fig)
def test_secondary_legend_bar(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
fig, ax = mpl.pyplot.subplots()
df.plot(kind="bar", secondary_y=["A"], ax=ax)
leg = ax.get_legend()
@@ -1287,7 +1299,11 @@ def test_secondary_legend_bar(self):
mpl.pyplot.close(fig)
def test_secondary_legend_bar_right(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
fig, ax = mpl.pyplot.subplots()
df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax)
leg = ax.get_legend()
@@ -1296,10 +1312,18 @@ def test_secondary_legend_bar_right(self):
mpl.pyplot.close(fig)
def test_secondary_legend_multi_col(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
fig = mpl.pyplot.figure()
ax = fig.add_subplot(211)
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
ax = df.plot(secondary_y=["C", "D"], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 865c02798c648..8a725c6e51e3f 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -11,6 +11,7 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
Series,
Timedelta,
Timestamp,
@@ -433,7 +434,11 @@ def test_resample_upsampling_picked_but_not_correct(unit):
@pytest.mark.parametrize("f", ["sum", "mean", "prod", "min", "max", "var"])
def test_resample_frame_basic_cy_funcs(f, unit):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((50, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=50, freq="B"),
+ )
df.index = df.index.as_unit(unit)
b = Grouper(freq="ME")
@@ -445,7 +450,11 @@ def test_resample_frame_basic_cy_funcs(f, unit):
@pytest.mark.parametrize("freq", ["YE", "ME"])
def test_resample_frame_basic_M_A(freq, unit):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((50, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=50, freq="B"),
+ )
df.index = df.index.as_unit(unit)
result = df.resample(freq).mean()
tm.assert_series_equal(result["A"], df["A"].resample(freq).mean())
@@ -453,7 +462,11 @@ def test_resample_frame_basic_M_A(freq, unit):
@pytest.mark.parametrize("freq", ["W-WED", "ME"])
def test_resample_frame_basic_kind(freq, unit):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
df.index = df.index.as_unit(unit)
msg = "The 'kind' keyword in DataFrame.resample is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
@@ -1465,7 +1478,11 @@ def test_resample_nunique(unit):
def test_resample_nunique_preserves_column_level_names(unit):
# see gh-23222
- df = tm.makeTimeDataFrame(freq="1D").abs()
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=5, freq="D"),
+ ).abs()
df.index = df.index.as_unit(unit)
df.columns = pd.MultiIndex.from_arrays(
[df.columns.tolist()] * 2, names=["lev0", "lev1"]
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index f07c223bf0de2..3408e6e4731bd 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -168,7 +168,7 @@ def test_join_on_fails_with_different_right_index(self):
"a": np.random.default_rng(2).choice(["m", "f"], size=10),
"b": np.random.default_rng(2).standard_normal(10),
},
- index=tm.makeCustomIndex(10, 2),
+ index=MultiIndex.from_product([range(5), ["A", "B"]]),
)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
@@ -180,7 +180,7 @@ def test_join_on_fails_with_different_left_index(self):
"a": np.random.default_rng(2).choice(["m", "f"], size=3),
"b": np.random.default_rng(2).standard_normal(3),
},
- index=tm.makeCustomIndex(3, 2),
+ index=MultiIndex.from_arrays([range(3), list("abc")]),
)
df2 = DataFrame(
{
@@ -204,7 +204,7 @@ def test_join_on_fails_with_different_column_counts(self):
"a": np.random.default_rng(2).choice(["m", "f"], size=10),
"b": np.random.default_rng(2).standard_normal(10),
},
- index=tm.makeCustomIndex(10, 2),
+ index=MultiIndex.from_product([range(5), ["A", "B"]]),
)
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index b10436889d829..ff9f927597956 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -6,6 +6,8 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
+ date_range,
lreshape,
melt,
wide_to_long,
@@ -15,7 +17,11 @@
@pytest.fixture
def df():
- res = tm.makeTimeDataFrame()[:10]
+ res = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
res["id1"] = (res["A"] > 0).astype(np.int64)
res["id2"] = (res["B"] > 0).astype(np.int64)
return res
@@ -281,7 +287,7 @@ def test_multiindex(self, df1):
@pytest.mark.parametrize(
"col",
[
- pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
+ pd.Series(date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
@@ -396,11 +402,11 @@ def test_ignore_multiindex(self):
def test_ignore_index_name_and_type(self):
# GH 17440
- index = pd.Index(["foo", "bar"], dtype="category", name="baz")
+ index = Index(["foo", "bar"], dtype="category", name="baz")
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
- expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz")
+ expected_index = Index(["foo", "bar"] * 2, dtype="category", name="baz")
expected = DataFrame(
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
index=expected_index,
@@ -1203,7 +1209,7 @@ def test_missing_stubname(self, dtype):
j="num",
sep="-",
)
- index = pd.Index(
+ index = Index(
[("1", 1), ("2", 1), ("1", 2), ("2", 2)],
name=("id", "num"),
)
diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py
index b294e2fcce9d8..3c70e839c8e20 100644
--- a/pandas/tests/series/methods/test_unstack.py
+++ b/pandas/tests/series/methods/test_unstack.py
@@ -4,8 +4,10 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
MultiIndex,
Series,
+ date_range,
)
import pandas._testing as tm
@@ -92,7 +94,7 @@ def test_unstack_tuplename_in_multiindex():
expected = DataFrame(
[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
columns=MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]),
- index=pd.Index([1, 2, 3], name=("B", "b")),
+ index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@@ -109,7 +111,7 @@ def test_unstack_tuplename_in_multiindex():
(
(("A", "a"), "B"),
[[1, 1, 1, 1], [1, 1, 1, 1]],
- pd.Index([3, 4], name="C"),
+ Index([3, 4], name="C"),
MultiIndex.from_tuples(
[("a", 1), ("a", 2), ("b", 1), ("b", 2)], names=[("A", "a"), "B"]
),
@@ -133,9 +135,12 @@ def test_unstack_mixed_type_name_in_multiindex(
def test_unstack_multi_index_categorical_values():
- mi = (
- tm.makeTimeDataFrame().stack(future_stack=True).index.rename(["major", "minor"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
)
+ mi = df.stack(future_stack=True).index.rename(["major", "minor"])
ser = Series(["foo"] * len(mi), index=mi, name="category", dtype="category")
result = ser.unstack()
@@ -144,7 +149,7 @@ def test_unstack_multi_index_categorical_values():
c = pd.Categorical(["foo"] * len(dti))
expected = DataFrame(
{"A": c.copy(), "B": c.copy(), "C": c.copy(), "D": c.copy()},
- columns=pd.Index(list("ABCD"), name="minor"),
+ columns=Index(list("ABCD"), name="minor"),
index=dti.rename("major"),
)
tm.assert_frame_equal(result, expected)
@@ -158,7 +163,7 @@ def test_unstack_mixed_level_names():
result = ser.unstack("x")
expected = DataFrame(
[[1], [2]],
- columns=pd.Index(["a"], name="x"),
+ columns=Index(["a"], name="x"),
index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]),
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 773d7e174feac..502096d41dde2 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -679,7 +679,7 @@ def test_constructor_broadcast_list(self):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
- df = tm.makeTimeDataFrame()
+ df = DataFrame(range(5), index=date_range("2020-01-01", periods=5))
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index 4fa256a6b8630..1e7fdd920e365 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -146,8 +146,8 @@ def test_multiindex_objects():
"D": pd.date_range("20130101", periods=5),
}
),
- tm.makeTimeDataFrame(),
- tm.makeTimeSeries(),
+ DataFrame(range(5), index=pd.date_range("2020-01-01", periods=5)),
+ Series(range(5), index=pd.date_range("2020-01-01", periods=5)),
Series(period_range("2020-01-01", periods=10, freq="D")),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
],
@@ -179,8 +179,8 @@ def test_hash_pandas_object(obj, index):
"D": pd.date_range("20130101", periods=5),
}
),
- tm.makeTimeDataFrame(),
- tm.makeTimeSeries(),
+ DataFrame(range(5), index=pd.date_range("2020-01-01", periods=5)),
+ Series(range(5), index=pd.date_range("2020-01-01", periods=5)),
Series(period_range("2020-01-01", periods=10, freq="D")),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
],
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56264 | 2023-11-30T19:58:56Z | 2023-12-01T18:37:05Z | 2023-12-01T18:37:05Z | 2024-01-30T21:09:38Z |
Backport PR #56179 on branch 2.1.x (BUG: to_numeric casting to ea for new string dtype) | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 684b68baa123c..3a72c0864d29c 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -23,6 +23,7 @@ Bug fixes
~~~~~~~~~
- Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55753`)
- Bug in :meth:`Index.__getitem__` returning wrong result for Arrow dtypes and negative stepsize (:issue:`55832`)
+- Fixed bug in :func:`to_numeric` converting to extension dtype for ``string[pyarrow_numpy]`` dtype (:issue:`56179`)
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`DataFrame.to_hdf` raising when columns have ``StringDtype`` (:issue:`55088`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index a50dbeb110bff..f1b14cdc58b13 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -224,7 +224,8 @@ def to_numeric(
set(),
coerce_numeric=coerce_numeric,
convert_to_masked_nullable=dtype_backend is not lib.no_default
- or isinstance(values_dtype, StringDtype),
+ or isinstance(values_dtype, StringDtype)
+ and not values_dtype.storage == "pyarrow_numpy",
)
except (ValueError, TypeError):
if errors == "raise":
@@ -239,6 +240,7 @@ def to_numeric(
dtype_backend is not lib.no_default
and new_mask is None
or isinstance(values_dtype, StringDtype)
+ and not values_dtype.storage == "pyarrow_numpy"
):
new_mask = np.zeros(values.shape, dtype=np.bool_)
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index 1d969e648b752..7f37e6003f313 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -4,12 +4,15 @@
from numpy import iinfo
import pytest
+import pandas.util._test_decorators as td
+
import pandas as pd
from pandas import (
ArrowDtype,
DataFrame,
Index,
Series,
+ option_context,
to_numeric,
)
import pandas._testing as tm
@@ -67,10 +70,14 @@ def test_empty(input_kwargs, result_kwargs):
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
@pytest.mark.parametrize("last_val", ["7", 7])
-def test_series(last_val):
- ser = Series(["1", "-3.14", last_val])
- result = to_numeric(ser)
+def test_series(last_val, infer_string):
+ with option_context("future.infer_string", infer_string):
+ ser = Series(["1", "-3.14", last_val])
+ result = to_numeric(ser)
expected = Series([1, -3.14, 7])
tm.assert_series_equal(result, expected)
| Backport PR #56179: BUG: to_numeric casting to ea for new string dtype | https://api.github.com/repos/pandas-dev/pandas/pulls/56263 | 2023-11-30T17:47:57Z | 2023-11-30T19:59:33Z | 2023-11-30T19:59:33Z | 2023-11-30T19:59:33Z |
BUG: support non-nano times in ewm | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index c73adc25cb1dd..7b09d12697055 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -561,12 +561,14 @@ Groupby/resample/rolling
- Bug in :meth:`.DataFrameGroupBy.idxmin`, :meth:`.DataFrameGroupBy.idxmax`, :meth:`.SeriesGroupBy.idxmin`, and :meth:`.SeriesGroupBy.idxmax` would not retain :class:`.Categorical` dtype when the index was a :class:`.CategoricalIndex` that contained NA values (:issue:`54234`)
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` when ``observed=False`` and ``f="idxmin"`` or ``f="idxmax"`` would incorrectly raise on unobserved categories (:issue:`54234`)
- Bug in :meth:`DataFrame.asfreq` and :meth:`Series.asfreq` with a :class:`DatetimeIndex` with non-nanosecond resolution incorrectly converting to nanosecond resolution (:issue:`55958`)
+- Bug in :meth:`DataFrame.ewm` when passed ``times`` with non-nanosecond ``datetime64`` or :class:`DatetimeTZDtype` dtype (:issue:`56262`)
- Bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`)
- Bug in :meth:`DataFrameGroupBy.value_counts` and :meth:`SeriesGroupBy.value_count` could result in incorrect sorting if the columns of the DataFrame or name of the Series are integers (:issue:`55951`)
- Bug in :meth:`DataFrameGroupBy.value_counts` and :meth:`SeriesGroupBy.value_count` would not respect ``sort=False`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` (:issue:`55951`)
- Bug in :meth:`DataFrameGroupBy.value_counts` and :meth:`SeriesGroupBy.value_count` would sort by proportions rather than frequencies when ``sort=True`` and ``normalize=True`` (:issue:`55951`)
+-
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index db659713c6f16..9ebf32d3e536e 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -12,13 +12,15 @@
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
- is_datetime64_ns_dtype,
+ is_datetime64_dtype,
is_numeric_dtype,
)
+from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import common
+from pandas.core.arrays.datetimelike import dtype_to_unit
from pandas.core.indexers.objects import (
BaseIndexer,
ExponentialMovingWindowIndexer,
@@ -56,6 +58,7 @@
from pandas._typing import (
Axis,
TimedeltaConvertibleTypes,
+ npt,
)
from pandas import (
@@ -101,7 +104,7 @@ def get_center_of_mass(
def _calculate_deltas(
times: np.ndarray | NDFrame,
halflife: float | TimedeltaConvertibleTypes | None,
-) -> np.ndarray:
+) -> npt.NDArray[np.float64]:
"""
Return the diff of the times divided by the half-life. These values are used in
the calculation of the ewm mean.
@@ -119,11 +122,11 @@ def _calculate_deltas(
np.ndarray
Diff of the times divided by the half-life
"""
+ unit = dtype_to_unit(times.dtype)
if isinstance(times, ABCSeries):
times = times._values
_times = np.asarray(times.view(np.int64), dtype=np.float64)
- # TODO: generalize to non-nano?
- _halflife = float(Timedelta(halflife).as_unit("ns")._value)
+ _halflife = float(Timedelta(halflife).as_unit(unit)._value)
return np.diff(_times) / _halflife
@@ -366,8 +369,12 @@ def __init__(
if self.times is not None:
if not self.adjust:
raise NotImplementedError("times is not supported with adjust=False.")
- if not is_datetime64_ns_dtype(self.times):
- raise ValueError("times must be datetime64[ns] dtype.")
+ times_dtype = getattr(self.times, "dtype", None)
+ if not (
+ is_datetime64_dtype(times_dtype)
+ or isinstance(times_dtype, DatetimeTZDtype)
+ ):
+ raise ValueError("times must be datetime64 dtype.")
if len(self.times) != len(obj):
raise ValueError("times must be the same length as the object.")
if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)):
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 427780db79783..058e5ce36e53e 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -60,7 +60,7 @@ def test_constructor(frame_or_series):
def test_ewma_times_not_datetime_type():
- msg = r"times must be datetime64\[ns\] dtype."
+ msg = r"times must be datetime64 dtype."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(5))
@@ -102,30 +102,6 @@ def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize(
- "unit",
- [
- pytest.param(
- "s",
- marks=pytest.mark.xfail(
- reason="ExponentialMovingWindow constructor raises on non-nano"
- ),
- ),
- pytest.param(
- "ms",
- marks=pytest.mark.xfail(
- reason="ExponentialMovingWindow constructor raises on non-nano"
- ),
- ),
- pytest.param(
- "us",
- marks=pytest.mark.xfail(
- reason="ExponentialMovingWindow constructor raises on non-nano"
- ),
- ),
- "ns",
- ],
-)
def test_ewma_with_times_variable_spacing(tz_aware_fixture, unit):
tz = tz_aware_fixture
halflife = "23 days"
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56262 | 2023-11-30T16:42:10Z | 2023-11-30T23:20:48Z | 2023-11-30T23:20:48Z | 2023-11-30T23:29:31Z |
TST: dt64 units | diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index a635ac77566e1..4dc0d477f89e8 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -686,7 +686,8 @@ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif dtype.kind in "mM":
- return dtype.type("NaT", "ns")
+ unit = np.datetime_data(dtype)[0]
+ return dtype.type("NaT", unit)
elif dtype.kind == "f":
return np.nan
elif dtype.kind in "iu":
diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index c989b3d26677c..20f0dcc816408 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -664,11 +664,8 @@ def test_reset_index_dtypes_on_empty_frame_with_multiindex(array, dtype):
def test_reset_index_empty_frame_with_datetime64_multiindex():
# https://github.com/pandas-dev/pandas/issues/35606
- idx = MultiIndex(
- levels=[[Timestamp("2020-07-20 00:00:00")], [3, 4]],
- codes=[[], []],
- names=["a", "b"],
- )
+ dti = pd.DatetimeIndex(["2020-07-20 00:00:00"], dtype="M8[ns]")
+ idx = MultiIndex.from_product([dti, [3, 4]], names=["a", "b"])[:0]
df = DataFrame(index=idx, columns=["c", "d"])
result = df.reset_index()
expected = DataFrame(
@@ -681,7 +678,8 @@ def test_reset_index_empty_frame_with_datetime64_multiindex():
def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby():
# https://github.com/pandas-dev/pandas/issues/35657
- df = DataFrame({"c1": [10.0], "c2": ["a"], "c3": pd.to_datetime("2020-01-01")})
+ dti = pd.DatetimeIndex(["2020-01-01"], dtype="M8[ns]")
+ df = DataFrame({"c1": [10.0], "c2": ["a"], "c3": dti})
df = df.head(0).groupby(["c2", "c3"])[["c1"]].sum()
result = df.reset_index()
expected = DataFrame(
diff --git a/pandas/tests/indexes/interval/test_formats.py b/pandas/tests/indexes/interval/test_formats.py
index bf335d154e186..edd7993056512 100644
--- a/pandas/tests/indexes/interval/test_formats.py
+++ b/pandas/tests/indexes/interval/test_formats.py
@@ -3,6 +3,7 @@
from pandas import (
DataFrame,
+ DatetimeIndex,
Index,
Interval,
IntervalIndex,
@@ -100,18 +101,14 @@ def test_get_values_for_csv(self, tuples, closed, expected_data):
expected = np.array(expected_data)
tm.assert_numpy_array_equal(result, expected)
- def test_timestamp_with_timezone(self):
+ def test_timestamp_with_timezone(self, unit):
# GH 55035
- index = IntervalIndex(
- [
- Interval(
- Timestamp("2020-01-01", tz="UTC"), Timestamp("2020-01-02", tz="UTC")
- )
- ]
- )
+ left = DatetimeIndex(["2020-01-01"], dtype=f"M8[{unit}, UTC]")
+ right = DatetimeIndex(["2020-01-02"], dtype=f"M8[{unit}, UTC]")
+ index = IntervalIndex.from_arrays(left, right)
result = repr(index)
expected = (
"IntervalIndex([(2020-01-01 00:00:00+00:00, 2020-01-02 00:00:00+00:00]], "
- "dtype='interval[datetime64[ns, UTC], right]')"
+ f"dtype='interval[datetime64[{unit}, UTC], right]')"
)
assert result == expected
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index a9e83f6ef87a3..fe9ad8de5765f 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from datetime import (
datetime,
time,
@@ -130,8 +132,15 @@ def df_ref(datapath):
return df_ref
-def adjust_expected(expected: DataFrame, read_ext: str) -> None:
+def get_exp_unit(read_ext: str, engine: str | None) -> str:
+ return "ns"
+
+
+def adjust_expected(expected: DataFrame, read_ext: str, engine: str) -> None:
expected.index.name = None
+ unit = get_exp_unit(read_ext, engine)
+ # error: "Index" has no attribute "as_unit"
+ expected.index = expected.index.as_unit(unit) # type: ignore[attr-defined]
def xfail_datetimes_with_pyxlsb(engine, request):
@@ -225,7 +234,7 @@ def test_usecols_list(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref[["B", "C"]]
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
df1 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3]
@@ -246,7 +255,7 @@ def test_usecols_str(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref[["A", "B", "C"]]
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
df2 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D"
@@ -264,7 +273,7 @@ def test_usecols_str(self, request, engine, read_ext, df_ref):
tm.assert_frame_equal(df3, expected)
expected = df_ref[["B", "C"]]
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
df2 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D"
@@ -302,7 +311,7 @@ def test_usecols_diff_positional_int_columns_order(
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref[["A", "C"]]
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
result = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols
@@ -321,7 +330,7 @@ def test_read_excel_without_slicing(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(result, expected)
@@ -330,7 +339,7 @@ def test_usecols_excel_range_str(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref[["C", "D"]]
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
result = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E"
@@ -428,7 +437,7 @@ def test_excel_table(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
df2 = pd.read_excel(
@@ -446,6 +455,7 @@ def test_excel_table(self, request, engine, read_ext, df_ref):
def test_reader_special_dtypes(self, request, engine, read_ext):
xfail_datetimes_with_pyxlsb(engine, request)
+ unit = get_exp_unit(read_ext, engine)
expected = DataFrame.from_dict(
{
"IntCol": [1, 2, -3, 4, 0],
@@ -453,13 +463,16 @@ def test_reader_special_dtypes(self, request, engine, read_ext):
"BoolCol": [True, False, True, True, False],
"StrCol": [1, 2, 3, 4, 5],
"Str2Col": ["a", 3, "c", "d", "e"],
- "DateCol": [
- datetime(2013, 10, 30),
- datetime(2013, 10, 31),
- datetime(1905, 1, 1),
- datetime(2013, 12, 14),
- datetime(2015, 3, 14),
- ],
+ "DateCol": Index(
+ [
+ datetime(2013, 10, 30),
+ datetime(2013, 10, 31),
+ datetime(1905, 1, 1),
+ datetime(2013, 12, 14),
+ datetime(2015, 3, 14),
+ ],
+ dtype=f"M8[{unit}]",
+ ),
},
)
basename = "test_types"
@@ -578,7 +591,7 @@ def test_reader_dtype_str(self, read_ext, dtype, expected):
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
- def test_dtype_backend(self, read_ext, dtype_backend):
+ def test_dtype_backend(self, read_ext, dtype_backend, engine):
# GH#36712
if read_ext in (".xlsb", ".xls"):
pytest.skip(f"No engine for filetype: '{read_ext}'")
@@ -621,6 +634,9 @@ def test_dtype_backend(self, read_ext, dtype_backend):
expected["j"] = ArrowExtensionArray(pa.array([None, None]))
else:
expected = df
+ unit = get_exp_unit(read_ext, engine)
+ expected["i"] = expected["i"].astype(f"M8[{unit}]")
+
tm.assert_frame_equal(result, expected)
def test_dtype_backend_and_dtype(self, read_ext):
@@ -812,7 +828,7 @@ def test_sheet_name(self, request, read_ext, engine, df_ref):
sheet_name = "Sheet1"
expected = df_ref
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
@@ -1010,6 +1026,8 @@ def test_read_excel_multiindex(self, request, engine, read_ext):
# see gh-4679
xfail_datetimes_with_pyxlsb(engine, request)
+ unit = get_exp_unit(read_ext, engine)
+
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
@@ -1023,6 +1041,7 @@ def test_read_excel_multiindex(self, request, engine, read_ext):
],
columns=mi,
)
+ expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]")
actual = pd.read_excel(
mi_file, sheet_name="mi_column", header=[0, 1], index_col=0
@@ -1102,6 +1121,9 @@ def test_read_excel_multiindex_blank_after_name(
mi_file = "testmultiindex" + read_ext
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"])
+
+ unit = get_exp_unit(read_ext, engine)
+
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
@@ -1115,6 +1137,7 @@ def test_read_excel_multiindex_blank_after_name(
names=["ilvl1", "ilvl2"],
),
)
+ expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]")
result = pd.read_excel(
mi_file,
sheet_name=sheet_name,
@@ -1218,6 +1241,8 @@ def test_read_excel_skiprows(self, request, engine, read_ext):
# GH 4903
xfail_datetimes_with_pyxlsb(engine, request)
+ unit = get_exp_unit(read_ext, engine)
+
actual = pd.read_excel(
"testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2]
)
@@ -1230,6 +1255,7 @@ def test_read_excel_skiprows(self, request, engine, read_ext):
],
columns=["a", "b", "c", "d"],
)
+ expected["c"] = expected["c"].astype(f"M8[{unit}]")
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
@@ -1262,11 +1288,13 @@ def test_read_excel_skiprows(self, request, engine, read_ext):
],
columns=["a", "b", "c", "d"],
)
+ expected["c"] = expected["c"].astype(f"M8[{unit}]")
tm.assert_frame_equal(actual, expected)
def test_read_excel_skiprows_callable_not_in(self, request, engine, read_ext):
# GH 4903
xfail_datetimes_with_pyxlsb(engine, request)
+ unit = get_exp_unit(read_ext, engine)
actual = pd.read_excel(
"testskiprows" + read_ext,
@@ -1282,6 +1310,7 @@ def test_read_excel_skiprows_callable_not_in(self, request, engine, read_ext):
],
columns=["a", "b", "c", "d"],
)
+ expected["c"] = expected["c"].astype(f"M8[{unit}]")
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, read_ext):
@@ -1538,7 +1567,7 @@ def test_excel_table_sheet_by_index(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
with pd.ExcelFile("test1" + read_ext) as excel:
df1 = pd.read_excel(excel, sheet_name=0, index_col=0)
@@ -1565,7 +1594,7 @@ def test_sheet_name(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref
- adjust_expected(expected, read_ext)
+ adjust_expected(expected, read_ext, engine)
filename = "test1"
sheet_name = "Sheet1"
@@ -1657,11 +1686,14 @@ def test_read_datetime_multiindex(self, request, engine, read_ext):
f = "test_datetime_mi" + read_ext
with pd.ExcelFile(f) as excel:
actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine)
- expected_column_index = MultiIndex.from_tuples(
- [(pd.to_datetime("02/29/2020"), pd.to_datetime("03/01/2020"))],
+
+ unit = get_exp_unit(read_ext, engine)
+ dti = pd.DatetimeIndex(["2020-02-29", "2020-03-01"], dtype=f"M8[{unit}]")
+ expected_column_index = MultiIndex.from_arrays(
+ [dti[:1], dti[1:]],
names=[
- pd.to_datetime("02/29/2020").to_pydatetime(),
- pd.to_datetime("03/01/2020").to_pydatetime(),
+ dti[0].to_pydatetime(),
+ dti[1].to_pydatetime(),
],
)
expected = DataFrame([], index=[], columns=expected_column_index)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 74286a3ddd8ed..8452ec01a0936 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -35,6 +35,10 @@
from pandas.io.excel._util import _writers
+def get_exp_unit(path: str) -> str:
+ return "ns"
+
+
@pytest.fixture
def frame(float_frame):
"""
@@ -461,6 +465,7 @@ def test_mixed(self, frame, path):
tm.assert_frame_equal(mixed_frame, recons)
def test_ts_frame(self, path):
+ unit = get_exp_unit(path)
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)),
columns=Index(list("ABCD"), dtype=object),
@@ -471,10 +476,13 @@ def test_ts_frame(self, path):
index = pd.DatetimeIndex(np.asarray(df.index), freq=None)
df.index = index
+ expected = df[:]
+ expected.index = expected.index.as_unit(unit)
+
df.to_excel(path, sheet_name="test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
- tm.assert_frame_equal(df, recons)
+ tm.assert_frame_equal(expected, recons)
def test_basics_with_nan(self, frame, path):
frame = frame.copy()
@@ -538,6 +546,7 @@ def test_inf_roundtrip(self, path):
def test_sheets(self, frame, path):
# freq doesn't round-trip
+ unit = get_exp_unit(path)
tsframe = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)),
columns=Index(list("ABCD"), dtype=object),
@@ -546,6 +555,9 @@ def test_sheets(self, frame, path):
index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
tsframe.index = index
+ expected = tsframe[:]
+ expected.index = expected.index.as_unit(unit)
+
frame = frame.copy()
frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
@@ -562,7 +574,7 @@ def test_sheets(self, frame, path):
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(frame, recons)
recons = pd.read_excel(reader, sheet_name="test2", index_col=0)
- tm.assert_frame_equal(tsframe, recons)
+ tm.assert_frame_equal(expected, recons)
assert 2 == len(reader.sheet_names)
assert "test1" == reader.sheet_names[0]
assert "test2" == reader.sheet_names[1]
@@ -660,6 +672,7 @@ def test_excel_roundtrip_indexname(self, merge_cells, path):
def test_excel_roundtrip_datetime(self, merge_cells, path):
# datetime.date, not sure what to test here exactly
+ unit = get_exp_unit(path)
# freq does not round-trip
tsframe = DataFrame(
@@ -678,12 +691,16 @@ def test_excel_roundtrip_datetime(self, merge_cells, path):
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
- tm.assert_frame_equal(tsframe, recons)
+ expected = tsframe[:]
+ expected.index = expected.index.as_unit(unit)
+ tm.assert_frame_equal(expected, recons)
def test_excel_date_datetime_format(self, ext, path):
# see gh-4133
#
# Excel output format strings
+ unit = get_exp_unit(path)
+
df = DataFrame(
[
[date(2014, 1, 31), date(1999, 9, 24)],
@@ -700,6 +717,7 @@ def test_excel_date_datetime_format(self, ext, path):
index=["DATE", "DATETIME"],
columns=["X", "Y"],
)
+ df_expected = df_expected.astype(f"M8[{unit}]")
with tm.ensure_clean(ext) as filename2:
with ExcelWriter(path) as writer1:
@@ -854,15 +872,20 @@ def test_to_excel_multiindex_cols(self, merge_cells, frame, path):
def test_to_excel_multiindex_dates(self, merge_cells, path):
# try multiindex with dates
+ unit = get_exp_unit(path)
tsframe = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=5, freq="B"),
)
- new_index = [tsframe.index, np.arange(len(tsframe.index), dtype=np.int64)]
- tsframe.index = MultiIndex.from_arrays(new_index)
+ tsframe.index = MultiIndex.from_arrays(
+ [
+ tsframe.index.as_unit(unit),
+ np.arange(len(tsframe.index), dtype=np.int64),
+ ],
+ names=["time", "foo"],
+ )
- tsframe.index.names = ["time", "foo"]
tsframe.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
@@ -1147,6 +1170,7 @@ def test_comment_empty_line(self, path):
def test_datetimes(self, path):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
+ unit = get_exp_unit(path)
datetimes = [
datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
@@ -1165,7 +1189,8 @@ def test_datetimes(self, path):
write_frame.to_excel(path, sheet_name="Sheet1")
read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0)
- tm.assert_series_equal(write_frame["A"], read_frame["A"])
+ expected = write_frame.astype(f"M8[{unit}]")
+ tm.assert_series_equal(expected["A"], read_frame["A"])
def test_bytes_io(self, engine):
# see gh-7074
| Aimed at trimming the diff in #55901 | https://api.github.com/repos/pandas-dev/pandas/pulls/56261 | 2023-11-30T16:31:55Z | 2023-12-04T19:11:54Z | 2023-12-04T19:11:54Z | 2023-12-04T20:09:23Z |
TYP: overload tz_to_dtype | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index de5832ba31b70..496a6987c3264 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -8,6 +8,7 @@
from typing import (
TYPE_CHECKING,
cast,
+ overload,
)
import warnings
@@ -93,6 +94,16 @@
_ITER_CHUNKSIZE = 10_000
+@overload
+def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype:
+ ...
+
+
+@overload
+def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]:
+ ...
+
+
def tz_to_dtype(
tz: tzinfo | None, unit: str = "ns"
) -> np.dtype[np.datetime64] | DatetimeTZDtype:
| I'm getting bunches of bogus mypy failures on main locally, so will need the CI to tell me if this is OK. | https://api.github.com/repos/pandas-dev/pandas/pulls/56260 | 2023-11-30T16:20:12Z | 2023-11-30T17:28:11Z | 2023-11-30T17:28:11Z | 2023-11-30T21:59:01Z |
Support e.g. modern MacBooks running Linux containers | diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index 67c6f9bedd9c9..3a764866eb1f8 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -93,7 +93,9 @@ jobs:
# https://github.com/github/feedback/discussions/7835#discussioncomment-1769026
buildplat:
- [ubuntu-22.04, manylinux_x86_64]
+ - [ubuntu-22.04, manylinux_aarch64]
- [ubuntu-22.04, musllinux_x86_64]
+ - [ubuntu-22.04, musllinux_aarch64]
- [macos-12, macosx_*]
- [windows-2022, win_amd64]
# TODO: support PyPy?
| Modern MacBooks have aarch64 CPUs. When using containers (or virtualized Linux in general), they now have to build Pandas from source if installing from the PyPI index. This costs a lot of time and complexity that is better dealt with in the Pandas product build environment.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56258 | 2023-11-30T13:15:02Z | 2024-01-01T00:39:20Z | null | 2024-01-02T07:54:42Z |
Less C89, most const, static funcs | diff --git a/pandas/_libs/src/vendored/ujson/python/objToJSON.c b/pandas/_libs/src/vendored/ujson/python/objToJSON.c
index 89f101964aff7..6271791fe201e 100644
--- a/pandas/_libs/src/vendored/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/vendored/ujson/python/objToJSON.c
@@ -146,12 +146,10 @@ typedef struct __PyObjectEncoder {
enum PANDAS_FORMAT { SPLIT, RECORDS, INDEX, COLUMNS, VALUES };
-int PdBlock_iterNext(JSOBJ, JSONTypeContext *);
+static int PdBlock_iterNext(JSOBJ, JSONTypeContext *);
static TypeContext *createTypeContext(void) {
- TypeContext *pc;
-
- pc = PyObject_Malloc(sizeof(TypeContext));
+ TypeContext *pc = PyObject_Malloc(sizeof(TypeContext));
if (!pc) {
PyErr_NoMemory();
return NULL;
@@ -235,12 +233,10 @@ static PyObject *get_values(PyObject *obj) {
static PyObject *get_sub_attr(PyObject *obj, char *attr, char *subAttr) {
PyObject *tmp = PyObject_GetAttrString(obj, attr);
- PyObject *ret;
-
if (tmp == 0) {
return 0;
}
- ret = PyObject_GetAttrString(tmp, subAttr);
+ PyObject *ret = PyObject_GetAttrString(tmp, subAttr);
Py_DECREF(tmp);
return ret;
@@ -248,12 +244,10 @@ static PyObject *get_sub_attr(PyObject *obj, char *attr, char *subAttr) {
static Py_ssize_t get_attr_length(PyObject *obj, char *attr) {
PyObject *tmp = PyObject_GetAttrString(obj, attr);
- Py_ssize_t ret;
-
if (tmp == 0) {
return 0;
}
- ret = PyObject_Length(tmp);
+ Py_ssize_t ret = PyObject_Length(tmp);
Py_DECREF(tmp);
if (ret == -1) {
@@ -266,9 +260,8 @@ static Py_ssize_t get_attr_length(PyObject *obj, char *attr) {
static npy_int64 get_long_attr(PyObject *o, const char *attr) {
// NB we are implicitly assuming that o is a Timedelta or Timestamp, or NaT
- npy_int64 long_val;
PyObject *value = PyObject_GetAttrString(o, attr);
- long_val =
+ const npy_int64 long_val =
(PyLong_Check(value) ? PyLong_AsLongLong(value) : PyLong_AsLong(value));
Py_DECREF(value);
@@ -293,20 +286,19 @@ static npy_int64 get_long_attr(PyObject *o, const char *attr) {
}
if (cReso == NPY_FR_us) {
- long_val = long_val * 1000L;
+ return long_val * 1000L;
} else if (cReso == NPY_FR_ms) {
- long_val = long_val * 1000000L;
+ return long_val * 1000000L;
} else if (cReso == NPY_FR_s) {
- long_val = long_val * 1000000000L;
+ return long_val * 1000000000L;
}
return long_val;
}
static npy_float64 total_seconds(PyObject *td) {
- npy_float64 double_val;
PyObject *value = PyObject_CallMethod(td, "total_seconds", NULL);
- double_val = PyFloat_AS_DOUBLE(value);
+ const npy_float64 double_val = PyFloat_AS_DOUBLE(value);
Py_DECREF(value);
return double_val;
}
@@ -361,10 +353,7 @@ static char *PyDateTimeToIsoCallback(JSOBJ obj, JSONTypeContext *tc,
static char *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, size_t *outLen) {
PyObject *obj = (PyObject *)_obj;
- PyObject *str;
- PyObject *tmp;
-
- str = PyObject_CallMethod(obj, "isoformat", NULL);
+ PyObject *str = PyObject_CallMethod(obj, "isoformat", NULL);
if (str == NULL) {
*outLen = 0;
if (!PyErr_Occurred()) {
@@ -374,7 +363,7 @@ static char *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, size_t *outLen) {
return NULL;
}
if (PyUnicode_Check(str)) {
- tmp = str;
+ PyObject *tmp = str;
str = PyUnicode_AsUTF8String(str);
Py_DECREF(tmp);
}
@@ -398,21 +387,16 @@ static void NpyArr_freeItemValue(JSOBJ Py_UNUSED(_obj), JSONTypeContext *tc) {
}
}
-int NpyArr_iterNextNone(JSOBJ Py_UNUSED(_obj), JSONTypeContext *Py_UNUSED(tc)) {
+static int NpyArr_iterNextNone(JSOBJ Py_UNUSED(_obj),
+ JSONTypeContext *Py_UNUSED(tc)) {
return 0;
}
-void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
- PyArrayObject *obj;
- NpyArrContext *npyarr;
-
- if (GET_TC(tc)->newObj) {
- obj = (PyArrayObject *)GET_TC(tc)->newObj;
- } else {
- obj = (PyArrayObject *)_obj;
- }
+static void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
+ PyArrayObject *obj =
+ (PyArrayObject *)(GET_TC(tc)->newObj ? GET_TC(tc)->newObj : _obj);
- npyarr = PyObject_Malloc(sizeof(NpyArrContext));
+ NpyArrContext *npyarr = PyObject_Malloc(sizeof(NpyArrContext));
GET_TC(tc)->npyarr = npyarr;
if (!npyarr) {
@@ -446,7 +430,7 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
npyarr->rowLabels = GET_TC(tc)->rowLabels;
}
-void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
+static void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
NpyArrContext *npyarr = GET_TC(tc)->npyarr;
if (npyarr) {
@@ -455,10 +439,10 @@ void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
}
}
-void NpyArrPassThru_iterBegin(JSOBJ Py_UNUSED(obj),
- JSONTypeContext *Py_UNUSED(tc)) {}
+static void NpyArrPassThru_iterBegin(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *Py_UNUSED(tc)) {}
-void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
+static void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
NpyArrContext *npyarr = GET_TC(tc)->npyarr;
// finished this dimension, reset the data pointer
npyarr->curdim--;
@@ -471,7 +455,7 @@ void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
NpyArr_freeItemValue(obj, tc);
}
-int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
+static int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
NpyArrContext *npyarr = GET_TC(tc)->npyarr;
if (PyErr_Occurred()) {
@@ -503,7 +487,7 @@ int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
return 1;
}
-int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) {
+static int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) {
NpyArrContext *npyarr = GET_TC(tc)->npyarr;
if (PyErr_Occurred()) {
@@ -531,21 +515,20 @@ int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) {
return 1;
}
-JSOBJ NpyArr_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ NpyArr_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *NpyArr_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *NpyArr_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
+ size_t *outLen) {
NpyArrContext *npyarr = GET_TC(tc)->npyarr;
- npy_intp idx;
char *cStr;
if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) {
- idx = npyarr->index[npyarr->stridedim] - 1;
+ const npy_intp idx = npyarr->index[npyarr->stridedim] - 1;
cStr = npyarr->columnLabels[idx];
} else {
- idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1;
+ const npy_intp idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1;
cStr = npyarr->rowLabels[idx];
}
@@ -563,7 +546,7 @@ char *NpyArr_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
// Uses a dedicated NpyArrContext for each column.
//=============================================================================
-void PdBlockPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
+static void PdBlockPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
if (blkCtxt->transpose) {
@@ -575,7 +558,7 @@ void PdBlockPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
NpyArr_freeItemValue(obj, tc);
}
-int PdBlock_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
+static int PdBlock_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
if (blkCtxt->colIdx >= blkCtxt->ncols) {
@@ -587,20 +570,20 @@ int PdBlock_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
return NpyArr_iterNextItem(obj, tc);
}
-char *PdBlock_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *PdBlock_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
+ size_t *outLen) {
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
NpyArrContext *npyarr = blkCtxt->npyCtxts[0];
- npy_intp idx;
char *cStr;
if (GET_TC(tc)->iterNext == PdBlock_iterNextItem) {
- idx = blkCtxt->colIdx - 1;
+ const npy_intp idx = blkCtxt->colIdx - 1;
cStr = npyarr->columnLabels[idx];
} else {
- idx = GET_TC(tc)->iterNext != PdBlock_iterNext
- ? npyarr->index[npyarr->stridedim - npyarr->inc] - 1
- : npyarr->index[npyarr->stridedim];
+ const npy_intp idx =
+ GET_TC(tc)->iterNext != PdBlock_iterNext
+ ? npyarr->index[npyarr->stridedim - npyarr->inc] - 1
+ : npyarr->index[npyarr->stridedim];
cStr = npyarr->rowLabels[idx];
}
@@ -609,18 +592,18 @@ char *PdBlock_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
return cStr;
}
-char *PdBlock_iterGetName_Transpose(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *PdBlock_iterGetName_Transpose(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *tc,
+ size_t *outLen) {
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
NpyArrContext *npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx];
- npy_intp idx;
char *cStr;
if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) {
- idx = npyarr->index[npyarr->stridedim] - 1;
+ const npy_intp idx = npyarr->index[npyarr->stridedim] - 1;
cStr = npyarr->columnLabels[idx];
} else {
- idx = blkCtxt->colIdx;
+ const npy_intp idx = blkCtxt->colIdx;
cStr = npyarr->rowLabels[idx];
}
@@ -628,9 +611,8 @@ char *PdBlock_iterGetName_Transpose(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
return cStr;
}
-int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) {
+static int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) {
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
- NpyArrContext *npyarr;
if (PyErr_Occurred() || ((JSONObjectEncoder *)tc->encoder)->errorMsg) {
return 0;
@@ -641,7 +623,7 @@ int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) {
return 0;
}
} else {
- npyarr = blkCtxt->npyCtxts[0];
+ const NpyArrContext *npyarr = blkCtxt->npyCtxts[0];
if (npyarr->index[npyarr->stridedim] >= npyarr->dim) {
return 0;
}
@@ -653,7 +635,8 @@ int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) {
return 1;
}
-void PdBlockPassThru_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void PdBlockPassThru_iterBegin(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *tc) {
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
if (blkCtxt->transpose) {
@@ -664,19 +647,14 @@ void PdBlockPassThru_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
}
}
-void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
- PyObject *obj, *values, *arrays, *array;
- PdBlockContext *blkCtxt;
- NpyArrContext *npyarr;
- Py_ssize_t i;
-
- obj = (PyObject *)_obj;
+static void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
+ PyObject *obj = (PyObject *)_obj;
GET_TC(tc)->iterGetName = GET_TC(tc)->transpose
? PdBlock_iterGetName_Transpose
: PdBlock_iterGetName;
- blkCtxt = PyObject_Malloc(sizeof(PdBlockContext));
+ PdBlockContext *blkCtxt = PyObject_Malloc(sizeof(PdBlockContext));
if (!blkCtxt) {
PyErr_NoMemory();
GET_TC(tc)->iterNext = NpyArr_iterNextNone;
@@ -702,21 +680,21 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
return;
}
- arrays = get_sub_attr(obj, "_mgr", "column_arrays");
+ PyObject *arrays = get_sub_attr(obj, "_mgr", "column_arrays");
if (!arrays) {
GET_TC(tc)->iterNext = NpyArr_iterNextNone;
return;
}
- for (i = 0; i < PyObject_Length(arrays); i++) {
- array = PyList_GET_ITEM(arrays, i);
+ for (Py_ssize_t i = 0; i < PyObject_Length(arrays); i++) {
+ PyObject *array = PyList_GET_ITEM(arrays, i);
if (!array) {
GET_TC(tc)->iterNext = NpyArr_iterNextNone;
goto ARR_RET;
}
// ensure we have a numpy array (i.e. np.asarray)
- values = PyObject_CallMethod(array, "__array__", NULL);
+ PyObject *values = PyObject_CallMethod(array, "__array__", NULL);
if ((!values) || (!PyArray_CheckExact(values))) {
// Didn't get a numpy array
((JSONObjectEncoder *)tc->encoder)->errorMsg = "";
@@ -728,12 +706,11 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
// init a dedicated context for this column
NpyArr_iterBegin(obj, tc);
- npyarr = GET_TC(tc)->npyarr;
GET_TC(tc)->itemValue = NULL;
((PyObjectEncoder *)tc->encoder)->npyCtxtPassthru = NULL;
- blkCtxt->npyCtxts[i] = npyarr;
+ blkCtxt->npyCtxts[i] = GET_TC(tc)->npyarr;
GET_TC(tc)->newObj = NULL;
}
GET_TC(tc)->npyarr = blkCtxt->npyCtxts[0];
@@ -743,18 +720,13 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
Py_DECREF(arrays);
}
-void PdBlock_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
- PdBlockContext *blkCtxt;
- NpyArrContext *npyarr;
- int i;
-
+static void PdBlock_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->itemValue = NULL;
- npyarr = GET_TC(tc)->npyarr;
-
- blkCtxt = GET_TC(tc)->pdblock;
+ NpyArrContext *npyarr = GET_TC(tc)->npyarr;
+ PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
if (blkCtxt) {
- for (i = 0; i < blkCtxt->ncols; i++) {
+ for (int i = 0; i < blkCtxt->ncols; i++) {
npyarr = blkCtxt->npyCtxts[i];
if (npyarr) {
if (npyarr->array) {
@@ -780,34 +752,35 @@ void PdBlock_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
// Tuple iteration functions
// itemValue is borrowed reference, no ref counting
//=============================================================================
-void Tuple_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
+static void Tuple_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->index = 0;
GET_TC(tc)->size = PyTuple_GET_SIZE((PyObject *)obj);
GET_TC(tc)->itemValue = NULL;
}
-int Tuple_iterNext(JSOBJ obj, JSONTypeContext *tc) {
- PyObject *item;
+static int Tuple_iterNext(JSOBJ obj, JSONTypeContext *tc) {
if (GET_TC(tc)->index >= GET_TC(tc)->size) {
return 0;
}
- item = PyTuple_GET_ITEM(obj, GET_TC(tc)->index);
+ PyObject *item = PyTuple_GET_ITEM(obj, GET_TC(tc)->index);
GET_TC(tc)->itemValue = item;
GET_TC(tc)->index++;
return 1;
}
-void Tuple_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) {}
+static void Tuple_iterEnd(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *Py_UNUSED(tc)) {}
-JSOBJ Tuple_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ Tuple_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *Tuple_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
- size_t *Py_UNUSED(outLen)) {
+static char *Tuple_iterGetName(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *Py_UNUSED(tc),
+ size_t *Py_UNUSED(outLen)) {
return NULL;
}
@@ -815,20 +788,18 @@ char *Tuple_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
// Set iteration functions
// itemValue is borrowed reference, no ref counting
//=============================================================================
-void Set_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
+static void Set_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->itemValue = NULL;
GET_TC(tc)->iterator = PyObject_GetIter(obj);
}
-int Set_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
- PyObject *item;
-
+static int Set_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
if (GET_TC(tc)->itemValue) {
Py_DECREF(GET_TC(tc)->itemValue);
GET_TC(tc)->itemValue = NULL;
}
- item = PyIter_Next(GET_TC(tc)->iterator);
+ PyObject *item = PyIter_Next(GET_TC(tc)->iterator);
if (item == NULL) {
return 0;
@@ -838,7 +809,7 @@ int Set_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return 1;
}
-void Set_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Set_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
if (GET_TC(tc)->itemValue) {
Py_DECREF(GET_TC(tc)->itemValue);
GET_TC(tc)->itemValue = NULL;
@@ -850,12 +821,13 @@ void Set_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
}
}
-JSOBJ Set_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ Set_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *Set_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
- size_t *Py_UNUSED(outLen)) {
+static char *Set_iterGetName(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *Py_UNUSED(tc),
+ size_t *Py_UNUSED(outLen)) {
return NULL;
}
@@ -864,13 +836,13 @@ char *Set_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
// itemName ref is borrowed from PyObject_Dir (attrList). No refcount
// itemValue ref is from PyObject_GetAttr. Ref counted
//=============================================================================
-void Dir_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
+static void Dir_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->attrList = PyObject_Dir(obj);
GET_TC(tc)->index = 0;
GET_TC(tc)->size = PyList_GET_SIZE(GET_TC(tc)->attrList);
}
-void Dir_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Dir_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
if (GET_TC(tc)->itemValue) {
Py_DECREF(GET_TC(tc)->itemValue);
GET_TC(tc)->itemValue = NULL;
@@ -884,13 +856,10 @@ void Dir_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
Py_DECREF((PyObject *)GET_TC(tc)->attrList);
}
-int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) {
+static int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) {
PyObject *obj = (PyObject *)_obj;
PyObject *itemValue = GET_TC(tc)->itemValue;
PyObject *itemName = GET_TC(tc)->itemName;
- PyObject *attr;
- PyObject *attrName;
- char *attrStr;
if (PyErr_Occurred() || ((JSONObjectEncoder *)tc->encoder)->errorMsg) {
return 0;
@@ -907,9 +876,10 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) {
}
for (; GET_TC(tc)->index < GET_TC(tc)->size; GET_TC(tc)->index++) {
- attrName = PyList_GET_ITEM(GET_TC(tc)->attrList, GET_TC(tc)->index);
- attr = PyUnicode_AsUTF8String(attrName);
- attrStr = PyBytes_AS_STRING(attr);
+ PyObject *attrName =
+ PyList_GET_ITEM(GET_TC(tc)->attrList, GET_TC(tc)->index);
+ PyObject *attr = PyUnicode_AsUTF8String(attrName);
+ const char *attrStr = PyBytes_AS_STRING(attr);
if (attrStr[0] == '_') {
Py_DECREF(attr);
@@ -949,12 +919,12 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) {
return 1;
}
-JSOBJ Dir_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ Dir_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *Dir_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *Dir_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
+ size_t *outLen) {
*outLen = PyBytes_GET_SIZE(GET_TC(tc)->itemName);
return PyBytes_AS_STRING(GET_TC(tc)->itemName);
}
@@ -963,12 +933,12 @@ char *Dir_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
// List iteration functions
// itemValue is borrowed from object (which is list). No refcounting
//=============================================================================
-void List_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
+static void List_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->index = 0;
GET_TC(tc)->size = PyList_GET_SIZE((PyObject *)obj);
}
-int List_iterNext(JSOBJ obj, JSONTypeContext *tc) {
+static int List_iterNext(JSOBJ obj, JSONTypeContext *tc) {
if (GET_TC(tc)->index >= GET_TC(tc)->size) {
return 0;
}
@@ -978,21 +948,23 @@ int List_iterNext(JSOBJ obj, JSONTypeContext *tc) {
return 1;
}
-void List_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) {}
+static void List_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) {
+}
-JSOBJ List_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ List_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *List_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
- size_t *Py_UNUSED(outLen)) {
+static char *List_iterGetName(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *Py_UNUSED(tc),
+ size_t *Py_UNUSED(outLen)) {
return NULL;
}
//=============================================================================
// pandas Index iteration functions
//=============================================================================
-void Index_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Index_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
GET_TC(tc)->index = 0;
GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char));
if (!GET_TC(tc)->cStr) {
@@ -1000,13 +972,12 @@ void Index_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
}
}
-int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) {
- Py_ssize_t index;
+static int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) {
if (!GET_TC(tc)->cStr) {
return 0;
}
- index = GET_TC(tc)->index;
+ const Py_ssize_t index = GET_TC(tc)->index;
Py_XDECREF(GET_TC(tc)->itemValue);
if (index == 0) {
memcpy(GET_TC(tc)->cStr, "name", sizeof(char) * 5);
@@ -1025,14 +996,15 @@ int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) {
return 1;
}
-void Index_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) {}
+static void Index_iterEnd(JSOBJ Py_UNUSED(obj),
+ JSONTypeContext *Py_UNUSED(tc)) {}
-JSOBJ Index_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ Index_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *Index_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *Index_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
+ size_t *outLen) {
*outLen = strlen(GET_TC(tc)->cStr);
return GET_TC(tc)->cStr;
}
@@ -1040,7 +1012,7 @@ char *Index_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
//=============================================================================
// pandas Series iteration functions
//=============================================================================
-void Series_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Series_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder;
GET_TC(tc)->index = 0;
GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char));
@@ -1050,13 +1022,12 @@ void Series_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
}
}
-int Series_iterNext(JSOBJ obj, JSONTypeContext *tc) {
- Py_ssize_t index;
+static int Series_iterNext(JSOBJ obj, JSONTypeContext *tc) {
if (!GET_TC(tc)->cStr) {
return 0;
}
- index = GET_TC(tc)->index;
+ const Py_ssize_t index = GET_TC(tc)->index;
Py_XDECREF(GET_TC(tc)->itemValue);
if (index == 0) {
memcpy(GET_TC(tc)->cStr, "name", sizeof(char) * 5);
@@ -1078,17 +1049,17 @@ int Series_iterNext(JSOBJ obj, JSONTypeContext *tc) {
return 1;
}
-void Series_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Series_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder;
enc->outputFormat = enc->originalOutputFormat;
}
-JSOBJ Series_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ Series_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *Series_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *Series_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
+ size_t *outLen) {
*outLen = strlen(GET_TC(tc)->cStr);
return GET_TC(tc)->cStr;
}
@@ -1096,7 +1067,7 @@ char *Series_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
//=============================================================================
// pandas DataFrame iteration functions
//=============================================================================
-void DataFrame_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void DataFrame_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder;
GET_TC(tc)->index = 0;
GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char));
@@ -1106,13 +1077,12 @@ void DataFrame_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
}
}
-int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) {
- Py_ssize_t index;
+static int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) {
if (!GET_TC(tc)->cStr) {
return 0;
}
- index = GET_TC(tc)->index;
+ const Py_ssize_t index = GET_TC(tc)->index;
Py_XDECREF(GET_TC(tc)->itemValue);
if (index == 0) {
memcpy(GET_TC(tc)->cStr, "columns", sizeof(char) * 8);
@@ -1132,17 +1102,17 @@ int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) {
return 1;
}
-void DataFrame_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void DataFrame_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder;
enc->outputFormat = enc->originalOutputFormat;
}
-JSOBJ DataFrame_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ DataFrame_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *DataFrame_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *DataFrame_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
+ size_t *outLen) {
*outLen = strlen(GET_TC(tc)->cStr);
return GET_TC(tc)->cStr;
}
@@ -1152,13 +1122,11 @@ char *DataFrame_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
// itemName might converted to string (Python_Str). Do refCounting
// itemValue is borrowed from object (which is dict). No refCounting
//=============================================================================
-void Dict_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Dict_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
GET_TC(tc)->index = 0;
}
-int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
- PyObject *itemNameTmp;
-
+static int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
if (GET_TC(tc)->itemName) {
Py_DECREF(GET_TC(tc)->itemName);
GET_TC(tc)->itemName = NULL;
@@ -1173,7 +1141,7 @@ int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
GET_TC(tc)->itemName = PyUnicode_AsUTF8String(GET_TC(tc)->itemName);
} else if (!PyBytes_Check(GET_TC(tc)->itemName)) {
GET_TC(tc)->itemName = PyObject_Str(GET_TC(tc)->itemName);
- itemNameTmp = GET_TC(tc)->itemName;
+ PyObject *itemNameTmp = GET_TC(tc)->itemName;
GET_TC(tc)->itemName = PyUnicode_AsUTF8String(GET_TC(tc)->itemName);
Py_DECREF(itemNameTmp);
} else {
@@ -1182,7 +1150,7 @@ int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return 1;
}
-void Dict_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Dict_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
if (GET_TC(tc)->itemName) {
Py_DECREF(GET_TC(tc)->itemName);
GET_TC(tc)->itemName = NULL;
@@ -1190,21 +1158,19 @@ void Dict_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
Py_DECREF(GET_TC(tc)->dictObj);
}
-JSOBJ Dict_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSOBJ Dict_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *Dict_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
- size_t *outLen) {
+static char *Dict_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc,
+ size_t *outLen) {
*outLen = PyBytes_GET_SIZE(GET_TC(tc)->itemName);
return PyBytes_AS_STRING(GET_TC(tc)->itemName);
}
-void NpyArr_freeLabels(char **labels, npy_intp len) {
- npy_intp i;
-
+static void NpyArr_freeLabels(char **labels, npy_intp len) {
if (labels) {
- for (i = 0; i < len; i++) {
+ for (npy_intp i = 0; i < len; i++) {
PyObject_Free(labels[i]);
}
PyObject_Free(labels);
@@ -1228,17 +1194,11 @@ void NpyArr_freeLabels(char **labels, npy_intp len) {
* this has instead just stringified any input save for datetime values,
* which may need to be represented in various formats.
*/
-char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
- npy_intp num) {
+static char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
+ npy_intp num) {
// NOTE this function steals a reference to labels.
PyObject *item = NULL;
- size_t len;
- npy_intp i, stride;
- char **ret;
- char *dataptr, *cLabel;
- int type_num;
- PyArray_Descr *dtype;
- NPY_DATETIMEUNIT base = enc->datetimeUnit;
+ const NPY_DATETIMEUNIT base = enc->datetimeUnit;
if (!labels) {
return 0;
@@ -1251,23 +1211,23 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
return 0;
}
- ret = PyObject_Malloc(sizeof(char *) * num);
+ char **ret = PyObject_Malloc(sizeof(char *) * num);
if (!ret) {
PyErr_NoMemory();
Py_DECREF(labels);
return 0;
}
- for (i = 0; i < num; i++) {
+ for (npy_intp i = 0; i < num; i++) {
ret[i] = NULL;
}
- stride = PyArray_STRIDE(labels, 0);
- dataptr = PyArray_DATA(labels);
- type_num = PyArray_TYPE(labels);
- dtype = PyArray_DESCR(labels);
+ const npy_intp stride = PyArray_STRIDE(labels, 0);
+ char *dataptr = PyArray_DATA(labels);
+ const int type_num = PyArray_TYPE(labels);
+ PyArray_Descr *dtype = PyArray_DESCR(labels);
- for (i = 0; i < num; i++) {
+ for (npy_intp i = 0; i < num; i++) {
item = PyArray_GETITEM(labels, dataptr);
if (!item) {
NpyArr_freeLabels(ret, num);
@@ -1298,6 +1258,8 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
}
}
+ size_t len;
+ char *cLabel;
if (is_datetimelike) {
if (i8date == get_nat()) {
len = 4;
@@ -1370,7 +1332,7 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
return ret;
}
-void Object_invokeDefaultHandler(PyObject *obj, PyObjectEncoder *enc) {
+static void Object_invokeDefaultHandler(PyObject *obj, PyObjectEncoder *enc) {
PyObject *tmpObj = NULL;
tmpObj = PyObject_CallFunctionObjArgs(enc->defaultHandler, obj, NULL);
if (!PyErr_Occurred()) {
@@ -1384,14 +1346,7 @@ void Object_invokeDefaultHandler(PyObject *obj, PyObjectEncoder *enc) {
return;
}
-void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
- PyObject *obj, *exc, *toDictFunc, *tmpObj, *values;
- TypeContext *pc;
- PyObjectEncoder *enc;
- double val;
- npy_int64 value;
- int unit;
-
+static void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
tc->prv = NULL;
if (!_obj) {
@@ -1399,8 +1354,8 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
return;
}
- obj = (PyObject *)_obj;
- enc = (PyObjectEncoder *)tc->encoder;
+ PyObject *obj = (PyObject *)_obj;
+ PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder;
if (PyBool_Check(obj)) {
tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE;
@@ -1410,7 +1365,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
return;
}
- pc = createTypeContext();
+ TypeContext *pc = createTypeContext();
if (!pc) {
tc->type = JT_INVALID;
return;
@@ -1418,9 +1373,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
tc->prv = pc;
if (PyTypeNum_ISDATETIME(enc->npyType)) {
- int64_t longVal;
-
- longVal = *(npy_int64 *)enc->npyValue;
+ const int64_t longVal = *(npy_int64 *)enc->npyValue;
if (longVal == get_nat()) {
tc->type = JT_NULL;
} else {
@@ -1468,7 +1421,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
return;
} else if (PyFloat_Check(obj)) {
- val = PyFloat_AS_DOUBLE(obj);
+ const double val = PyFloat_AS_DOUBLE(obj);
if (npy_isnan(val) || npy_isinf(val)) {
tc->type = JT_NULL;
} else {
@@ -1535,12 +1488,10 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
}
return;
} else if (PyDelta_Check(obj)) {
- if (PyObject_HasAttrString(obj, "_value")) {
- // pd.Timedelta object or pd.NaT
- value = get_long_attr(obj, "_value");
- } else {
- value = total_seconds(obj) * 1000000000LL; // nanoseconds per sec
- }
+ npy_int64 value =
+ PyObject_HasAttrString(obj, "_value") ? get_long_attr(obj, "_value")
+ : // pd.Timedelta object or pd.NaT
+ total_seconds(obj) * 1000000000LL; // nanoseconds per sec
if (value == get_nat()) {
tc->type = JT_NULL;
@@ -1549,14 +1500,12 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
pc->PyTypeToUTF8 = NpyTimeDeltaToIsoCallback;
tc->type = JT_UTF8;
} else {
- unit = ((PyObjectEncoder *)tc->encoder)->datetimeUnit;
+ const int unit = ((PyObjectEncoder *)tc->encoder)->datetimeUnit;
if (scaleNanosecToUnit(&value, unit) != 0) {
// TODO(username): Add some kind of error handling here
}
- exc = PyErr_Occurred();
-
- if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) {
+ if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_OverflowError)) {
goto INVALID;
}
@@ -1569,9 +1518,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
PyArray_CastScalarToCtype(obj, &(pc->longValue),
PyArray_DescrFromType(NPY_INT64));
- exc = PyErr_Occurred();
-
- if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) {
+ if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_OverflowError)) {
goto INVALID;
}
@@ -1640,11 +1587,11 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) {
tc->type = JT_OBJECT;
- tmpObj = PyObject_GetAttrString(obj, "index");
+ PyObject *tmpObj = PyObject_GetAttrString(obj, "index");
if (!tmpObj) {
goto INVALID;
}
- values = get_values(tmpObj);
+ PyObject *values = get_values(tmpObj);
Py_DECREF(tmpObj);
if (!values) {
goto INVALID;
@@ -1722,11 +1669,11 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
tc->type = JT_ARRAY;
} else if (enc->outputFormat == RECORDS) {
tc->type = JT_ARRAY;
- tmpObj = PyObject_GetAttrString(obj, "columns");
+ PyObject *tmpObj = PyObject_GetAttrString(obj, "columns");
if (!tmpObj) {
goto INVALID;
}
- values = get_values(tmpObj);
+ PyObject *values = get_values(tmpObj);
if (!values) {
Py_DECREF(tmpObj);
goto INVALID;
@@ -1740,13 +1687,13 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
}
} else if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) {
tc->type = JT_OBJECT;
- tmpObj =
+ PyObject *tmpObj =
(enc->outputFormat == INDEX ? PyObject_GetAttrString(obj, "index")
: PyObject_GetAttrString(obj, "columns"));
if (!tmpObj) {
goto INVALID;
}
- values = get_values(tmpObj);
+ PyObject *values = get_values(tmpObj);
if (!values) {
Py_DECREF(tmpObj);
goto INVALID;
@@ -1824,7 +1771,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
return;
}
- toDictFunc = PyObject_GetAttrString(obj, "toDict");
+ PyObject *toDictFunc = PyObject_GetAttrString(obj, "toDict");
if (toDictFunc) {
PyObject *tuple = PyTuple_New(0);
@@ -1876,7 +1823,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
return;
}
-void Object_endTypeContext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static void Object_endTypeContext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
if (tc->prv) {
Py_XDECREF(GET_TC(tc)->newObj);
GET_TC(tc)->newObj = NULL;
@@ -1891,21 +1838,21 @@ void Object_endTypeContext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
}
}
-const char *Object_getStringValue(JSOBJ obj, JSONTypeContext *tc,
- size_t *_outLen) {
+static const char *Object_getStringValue(JSOBJ obj, JSONTypeContext *tc,
+ size_t *_outLen) {
return GET_TC(tc)->PyTypeToUTF8(obj, tc, _outLen);
}
-JSINT64 Object_getLongValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static JSINT64 Object_getLongValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->longValue;
}
-double Object_getDoubleValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+static double Object_getDoubleValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->doubleValue;
}
-const char *Object_getBigNumStringValue(JSOBJ obj, JSONTypeContext *tc,
- size_t *_outLen) {
+static const char *Object_getBigNumStringValue(JSOBJ obj, JSONTypeContext *tc,
+ size_t *_outLen) {
PyObject *repr = PyObject_Str(obj);
const char *str = PyUnicode_AsUTF8AndSize(repr, (Py_ssize_t *)_outLen);
char *bytes = PyObject_Malloc(*_outLen + 1);
@@ -1919,23 +1866,24 @@ const char *Object_getBigNumStringValue(JSOBJ obj, JSONTypeContext *tc,
static void Object_releaseObject(JSOBJ _obj) { Py_DECREF((PyObject *)_obj); }
-void Object_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
+static void Object_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->iterBegin(obj, tc);
}
-int Object_iterNext(JSOBJ obj, JSONTypeContext *tc) {
+static int Object_iterNext(JSOBJ obj, JSONTypeContext *tc) {
return GET_TC(tc)->iterNext(obj, tc);
}
-void Object_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
+static void Object_iterEnd(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->iterEnd(obj, tc);
}
-JSOBJ Object_iterGetValue(JSOBJ obj, JSONTypeContext *tc) {
+static JSOBJ Object_iterGetValue(JSOBJ obj, JSONTypeContext *tc) {
return GET_TC(tc)->iterGetValue(obj, tc);
}
-char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) {
+static char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc,
+ size_t *outLen) {
return GET_TC(tc)->iterGetName(obj, tc, outLen);
}
@@ -1962,9 +1910,6 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args,
"indent",
NULL};
- char buffer[65536];
- char *ret;
- PyObject *newobj;
PyObject *oinput = NULL;
PyObject *oensureAscii = NULL;
int idoublePrecision = 10; // default double precision setting
@@ -1994,9 +1939,9 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args,
PyObject_Free,
-1, // recursionMax
idoublePrecision,
- 1, // forceAscii
- 0, // encodeHTMLChars
- 0, // indent
+ 1, // forceAscii
+ 0, // encodeHTMLChars
+ indent, // indent
}};
JSONObjectEncoder *encoder = (JSONObjectEncoder *)&pyEncoder;
@@ -2080,7 +2025,9 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args,
encoder->indent = indent;
pyEncoder.originalOutputFormat = pyEncoder.outputFormat;
- ret = JSON_EncodeObject(oinput, encoder, buffer, sizeof(buffer));
+
+ char buffer[65536];
+ char *ret = JSON_EncodeObject(oinput, encoder, buffer, sizeof(buffer));
if (PyErr_Occurred()) {
return NULL;
}
@@ -2093,7 +2040,7 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args,
return NULL;
}
- newobj = PyUnicode_FromString(ret);
+ PyObject *newobj = PyUnicode_FromString(ret);
if (ret != buffer) {
encoder->free(ret);
| Will have to do this in a few passes for other files, but generally we:
1. Are removing C89 style variable declarations at the top of functions
2. Adding `const` qualifiers where we can
3. Making more functions static that aren't intended to be linked outside of their translation unit
Point 1 helps with code readability, Point 2 can help with code safety and compiler optimization. Point 3 helps with code structure and makes functions "local" to their translaction unit instead of "global" | https://api.github.com/repos/pandas-dev/pandas/pulls/56254 | 2023-11-30T08:07:47Z | 2023-11-30T17:29:14Z | 2023-11-30T17:29:14Z | 2023-11-30T17:29:21Z |
FIX: Solving Int64 precision loss when read_csv(StringIO(data), dtype… | diff --git a/=2.2.0 b/=2.2.0
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/=6.46.1 b/=6.46.1
new file mode 100644
index 0000000000000..dc9132a1fecbe
--- /dev/null
+++ b/=6.46.1
@@ -0,0 +1,16 @@
+Requirement already satisfied: cython in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (3.0.5)
+Requirement already satisfied: python-dateutil in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (2.8.2)
+Requirement already satisfied: pytz in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (2023.3.post1)
+Requirement already satisfied: pytest in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (7.4.3)
+Requirement already satisfied: pytest-xdist in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (3.5.0)
+Requirement already satisfied: hypothesis in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (6.91.0)
+Requirement already satisfied: versioneer[toml] in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (0.29)
+Requirement already satisfied: tomli in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from versioneer[toml]) (2.0.1)
+Requirement already satisfied: six>=1.5 in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from python-dateutil) (1.16.0)
+Requirement already satisfied: iniconfig in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from pytest) (2.0.0)
+Requirement already satisfied: packaging in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from pytest) (23.2)
+Requirement already satisfied: pluggy<2.0,>=0.12 in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from pytest) (1.3.0)
+Requirement already satisfied: exceptiongroup>=1.0.0rc8 in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from pytest) (1.2.0)
+Requirement already satisfied: execnet>=1.1 in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from pytest-xdist) (2.0.2)
+Requirement already satisfied: attrs>=19.2.0 in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from hypothesis) (23.1.0)
+Requirement already satisfied: sortedcontainers<3.0.0,>=2.1.0 in /home/martins/virtualenvs/pandas-dev/lib/python3.10/site-packages (from hypothesis) (2.4.0)
diff --git a/=7.3.2 b/=7.3.2
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/doc/source/user_guide/style.nbconvert.ipynb b/doc/source/user_guide/style.nbconvert.ipynb
new file mode 100644
index 0000000000000..5901f10dff94b
--- /dev/null
+++ b/doc/source/user_guide/style.nbconvert.ipynb
@@ -0,0 +1,15773 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Table Visualization\n",
+ "\n",
+ "This section demonstrates visualization of tabular data using the [Styler][styler]\n",
+ "class. For information on visualization with charting please see [Chart Visualization][viz]. This document is written as a Jupyter Notebook, and can be viewed or downloaded [here][download].\n",
+ "\n",
+ "## Styler Object and Customising the Display\n",
+ "Styling and output display customisation should be performed **after** the data in a DataFrame has been processed. The Styler is **not** dynamically updated if further changes to the DataFrame are made. The `DataFrame.style` attribute is a property that returns a [Styler][styler] object. It has a `_repr_html_` method defined on it so it is rendered automatically in Jupyter Notebook.\n",
+ "\n",
+ "The Styler, which can be used for large data but is primarily designed for small data, currently has the ability to output to these formats:\n",
+ "\n",
+ " - HTML\n",
+ " - LaTeX\n",
+ " - String (and CSV by extension)\n",
+ " - Excel\n",
+ " - (JSON is not currently available)\n",
+ "\n",
+ "The first three of these have display customisation methods designed to format and customise the output. These include:\n",
+ "\n",
+ " - Formatting values, the index and columns headers, using [.format()][formatfunc] and [.format_index()][formatfuncindex],\n",
+ " - Renaming the index or column header labels, using [.relabel_index()][relabelfunc]\n",
+ " - Hiding certain columns, the index and/or column headers, or index names, using [.hide()][hidefunc]\n",
+ " - Concatenating similar DataFrames, using [.concat()][concatfunc]\n",
+ " \n",
+ "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n",
+ "[viz]: visualization.rst\n",
+ "[download]: https://nbviewer.org/github/pandas-dev/pandas/blob/main/doc/source/user_guide/style.ipynb\n",
+ "[format]: https://docs.python.org/3/library/string.html#format-specification-mini-language\n",
+ "[formatfunc]: ../reference/api/pandas.io.formats.style.Styler.format.rst\n",
+ "[formatfuncindex]: ../reference/api/pandas.io.formats.style.Styler.format_index.rst\n",
+ "[relabelfunc]: ../reference/api/pandas.io.formats.style.Styler.relabel_index.rst\n",
+ "[hidefunc]: ../reference/api/pandas.io.formats.style.Styler.hide.rst\n",
+ "[concatfunc]: ../reference/api/pandas.io.formats.style.Styler.concat.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:21.034532Z",
+ "iopub.status.busy": "2023-11-30T07:57:21.034335Z",
+ "iopub.status.idle": "2023-11-30T07:57:21.352832Z",
+ "shell.execute_reply": "2023-11-30T07:57:21.352268Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot\n",
+ "# We have this here to trigger matplotlib's font cache stuff.\n",
+ "# This cell is hidden from the output"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Formatting the Display\n",
+ "\n",
+ "### Formatting Values\n",
+ "\n",
+ "The [Styler][styler] distinguishes the *display* value from the *actual* value, in both data values and index or columns headers. To control the display value, the text is printed in each cell as a string, and we can use the [.format()][formatfunc] and [.format_index()][formatfuncindex] methods to manipulate this according to a [format spec string][format] or a callable that takes a single value and returns a string. It is possible to define this for the whole table, or index, or for individual columns, or MultiIndex levels. We can also overwrite index names.\n",
+ "\n",
+ "Additionally, the format function has a **precision** argument to specifically help format floats, as well as **decimal** and **thousands** separators to support other locales, an **na_rep** argument to display missing data, and an **escape** and **hyperlinks** arguments to help displaying safe-HTML or safe-LaTeX. The default formatter is configured to adopt pandas' global options such as `styler.format.precision` option, controllable using `with pd.option_context('format.precision', 2):`\n",
+ "\n",
+ "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n",
+ "[format]: https://docs.python.org/3/library/string.html#format-specification-mini-language\n",
+ "[formatfunc]: ../reference/api/pandas.io.formats.style.Styler.format.rst\n",
+ "[formatfuncindex]: ../reference/api/pandas.io.formats.style.Styler.format_index.rst\n",
+ "[relabelfunc]: ../reference/api/pandas.io.formats.style.Styler.relabel_index.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:21.355225Z",
+ "iopub.status.busy": "2023-11-30T07:57:21.354979Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.076652Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.075425Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_9ec7b\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_9ec7b_level0_col0\" class=\"col_heading level0 col0\" >STRINGS</th>\n",
+ " <th id=\"T_9ec7b_level0_col1\" class=\"col_heading level0 col1\" >INTS</th>\n",
+ " <th id=\"T_9ec7b_level0_col2\" class=\"col_heading level0 col2\" >FLOATS</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_9ec7b_level0_row0\" class=\"row_heading level0 row0\" >row 1</th>\n",
+ " <td id=\"T_9ec7b_row0_col0\" class=\"data row0 col0\" >Adam</td>\n",
+ " <td id=\"T_9ec7b_row0_col1\" class=\"data row0 col1\" >1</td>\n",
+ " <td id=\"T_9ec7b_row0_col2\" class=\"data row0 col2\" >1,123</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9ec7b_level0_row1\" class=\"row_heading level0 row1\" >row 2</th>\n",
+ " <td id=\"T_9ec7b_row1_col0\" class=\"data row1 col0\" >Mike</td>\n",
+ " <td id=\"T_9ec7b_row1_col1\" class=\"data row1 col1\" >3</td>\n",
+ " <td id=\"T_9ec7b_row1_col2\" class=\"data row1 col2\" >1.000,230</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c17dabcd0>"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "import numpy as np\n",
+ "import matplotlib as mpl\n",
+ "\n",
+ "df = pd.DataFrame({\n",
+ " \"strings\": [\"Adam\", \"Mike\"],\n",
+ " \"ints\": [1, 3],\n",
+ " \"floats\": [1.123, 1000.23]\n",
+ "})\n",
+ "df.style \\\n",
+ " .format(precision=3, thousands=\".\", decimal=\",\") \\\n",
+ " .format_index(str.upper, axis=1) \\\n",
+ " .relabel_index([\"row 1\", \"row 2\"], axis=0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Using Styler to manipulate the display is a useful feature because maintaining the indexing and data values for other purposes gives greater control. You do not have to overwrite your DataFrame to display it how you like. Here is a more comprehensive example of using the formatting functions whilst still relying on the underlying data for indexing and calculations."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.104384Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.103883Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.114017Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.113424Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<div>\n",
+ "<style scoped>\n",
+ " .dataframe tbody tr th:only-of-type {\n",
+ " vertical-align: middle;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "</style>\n",
+ "<table border=\"1\" class=\"dataframe\">\n",
+ " <thead>\n",
+ " <tr style=\"text-align: right;\">\n",
+ " <th></th>\n",
+ " <th>Tokyo</th>\n",
+ " <th>Beijing</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th>2021-01-01</th>\n",
+ " <td>2.033751</td>\n",
+ " <td>2.369463</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-02</th>\n",
+ " <td>3.611768</td>\n",
+ " <td>0.851853</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-03</th>\n",
+ " <td>1.079822</td>\n",
+ " <td>3.499926</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-04</th>\n",
+ " <td>3.431709</td>\n",
+ " <td>0.568034</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-05</th>\n",
+ " <td>3.872228</td>\n",
+ " <td>4.537014</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-06</th>\n",
+ " <td>3.644568</td>\n",
+ " <td>3.040852</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-07</th>\n",
+ " <td>4.929528</td>\n",
+ " <td>4.358659</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-08</th>\n",
+ " <td>4.410750</td>\n",
+ " <td>4.864746</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-09</th>\n",
+ " <td>0.662192</td>\n",
+ " <td>1.373293</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2021-01-10</th>\n",
+ " <td>0.596985</td>\n",
+ " <td>3.833749</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</div>"
+ ],
+ "text/plain": [
+ " Tokyo Beijing\n",
+ "2021-01-01 2.033751 2.369463\n",
+ "2021-01-02 3.611768 0.851853\n",
+ "2021-01-03 1.079822 3.499926\n",
+ "2021-01-04 3.431709 0.568034\n",
+ "2021-01-05 3.872228 4.537014\n",
+ "2021-01-06 3.644568 3.040852\n",
+ "2021-01-07 4.929528 4.358659\n",
+ "2021-01-08 4.410750 4.864746\n",
+ "2021-01-09 0.662192 1.373293\n",
+ "2021-01-10 0.596985 3.833749"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "weather_df = pd.DataFrame(np.random.rand(10,2)*5, \n",
+ " index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
+ " columns=[\"Tokyo\", \"Beijing\"])\n",
+ "\n",
+ "def rain_condition(v): \n",
+ " if v < 1.75:\n",
+ " return \"Dry\"\n",
+ " elif v < 2.75:\n",
+ " return \"Rain\"\n",
+ " return \"Heavy Rain\"\n",
+ "\n",
+ "def make_pretty(styler):\n",
+ " styler.set_caption(\"Weather Conditions\")\n",
+ " styler.format(rain_condition)\n",
+ " styler.format_index(lambda v: v.strftime(\"%A\"))\n",
+ " styler.background_gradient(axis=None, vmin=1, vmax=5, cmap=\"YlGnBu\")\n",
+ " return styler\n",
+ "\n",
+ "weather_df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.116119Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.115921Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.125129Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.124639Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_ab208_row0_col0 {\n",
+ " background-color: #2296c1;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row0_col1 {\n",
+ " background-color: #ffffd9;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_ab208_row1_col0 {\n",
+ " background-color: #216bae;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row1_col1 {\n",
+ " background-color: #22328f;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row2_col0 {\n",
+ " background-color: #1f82b9;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row2_col1 {\n",
+ " background-color: #3eb3c4;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row3_col0 {\n",
+ " background-color: #0c2060;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row3_col1 {\n",
+ " background-color: #24409a;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row4_col0 {\n",
+ " background-color: #253b97;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_ab208_row4_col1 {\n",
+ " background-color: #0f2367;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_ab208\">\n",
+ " <caption>Weather Conditions</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_ab208_level0_col0\" class=\"col_heading level0 col0\" >Tokyo</th>\n",
+ " <th id=\"T_ab208_level0_col1\" class=\"col_heading level0 col1\" >Beijing</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_ab208_level0_row0\" class=\"row_heading level0 row0\" >Monday</th>\n",
+ " <td id=\"T_ab208_row0_col0\" class=\"data row0 col0\" >Heavy Rain</td>\n",
+ " <td id=\"T_ab208_row0_col1\" class=\"data row0 col1\" >Dry</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab208_level0_row1\" class=\"row_heading level0 row1\" >Tuesday</th>\n",
+ " <td id=\"T_ab208_row1_col0\" class=\"data row1 col0\" >Heavy Rain</td>\n",
+ " <td id=\"T_ab208_row1_col1\" class=\"data row1 col1\" >Heavy Rain</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab208_level0_row2\" class=\"row_heading level0 row2\" >Wednesday</th>\n",
+ " <td id=\"T_ab208_row2_col0\" class=\"data row2 col0\" >Heavy Rain</td>\n",
+ " <td id=\"T_ab208_row2_col1\" class=\"data row2 col1\" >Heavy Rain</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab208_level0_row3\" class=\"row_heading level0 row3\" >Thursday</th>\n",
+ " <td id=\"T_ab208_row3_col0\" class=\"data row3 col0\" >Heavy Rain</td>\n",
+ " <td id=\"T_ab208_row3_col1\" class=\"data row3 col1\" >Heavy Rain</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab208_level0_row4\" class=\"row_heading level0 row4\" >Friday</th>\n",
+ " <td id=\"T_ab208_row4_col0\" class=\"data row4 col0\" >Heavy Rain</td>\n",
+ " <td id=\"T_ab208_row4_col1\" class=\"data row4 col1\" >Heavy Rain</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c17dabf40>"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "weather_df.loc[\"2021-01-04\":\"2021-01-08\"].style.pipe(make_pretty)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Hiding Data\n",
+ "\n",
+ "The index and column headers can be completely hidden, as well subselecting rows or columns that one wishes to exclude. Both these options are performed using the same methods.\n",
+ "\n",
+ "The index can be hidden from rendering by calling [.hide()][hideidx] without any arguments, which might be useful if your index is integer based. Similarly column headers can be hidden by calling [.hide(axis=\"columns\")][hideidx] without any further arguments.\n",
+ "\n",
+ "Specific rows or columns can be hidden from rendering by calling the same [.hide()][hideidx] method and passing in a row/column label, a list-like or a slice of row/column labels to for the ``subset`` argument.\n",
+ "\n",
+ "Hiding does not change the integer arrangement of CSS classes, e.g. hiding the first two columns of a DataFrame means the column class indexing will still start at `col2`, since `col0` and `col1` are simply ignored.\n",
+ "\n",
+ "[hideidx]: ../reference/api/pandas.io.formats.style.Styler.hide.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.127108Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.126941Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.132648Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.132172Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_f38bb\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_f38bb_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_f38bb_level0_col3\" class=\"col_heading level0 col3\" >3</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_f38bb_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_f38bb_row1_col1\" class=\"data row1 col1\" >-0.704067</td>\n",
+ " <td id=\"T_f38bb_row1_col3\" class=\"data row1 col3\" >1.588432</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_f38bb_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_f38bb_row3_col1\" class=\"data row3 col1\" >-0.251142</td>\n",
+ " <td id=\"T_f38bb_row3_col3\" class=\"data row3 col3\" >0.249267</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c17dd1480>"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = pd.DataFrame(np.random.randn(5, 5))\n",
+ "df.style \\\n",
+ " .hide(subset=[0, 2, 4], axis=0) \\\n",
+ " .hide(subset=[0, 2, 4], axis=1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To invert the function to a **show** functionality it is best practice to compose a list of hidden items."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.134586Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.134385Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.140825Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.140290Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_7d95c\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_7d95c_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_7d95c_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " <th id=\"T_7d95c_level0_col4\" class=\"col_heading level0 col4\" >4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_7d95c_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_7d95c_row0_col0\" class=\"data row0 col0\" >0.417573</td>\n",
+ " <td id=\"T_7d95c_row0_col2\" class=\"data row0 col2\" >0.771418</td>\n",
+ " <td id=\"T_7d95c_row0_col4\" class=\"data row0 col4\" >-2.290184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_7d95c_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_7d95c_row2_col0\" class=\"data row2 col0\" >-0.246044</td>\n",
+ " <td id=\"T_7d95c_row2_col2\" class=\"data row2 col2\" >-1.383496</td>\n",
+ " <td id=\"T_7d95c_row2_col4\" class=\"data row2 col4\" >-0.025000</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_7d95c_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_7d95c_row4_col0\" class=\"data row4 col0\" >-2.019049</td>\n",
+ " <td id=\"T_7d95c_row4_col2\" class=\"data row4 col2\" >0.078452</td>\n",
+ " <td id=\"T_7d95c_row4_col4\" class=\"data row4 col4\" >0.421583</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0ad9b550>"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "show = [0, 2, 4]\n",
+ "df.style \\\n",
+ " .hide([row for row in df.index if row not in show], axis=0) \\\n",
+ " .hide([col for col in df.columns if col not in show], axis=1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Concatenating DataFrame Outputs\n",
+ "\n",
+ "Two or more Stylers can be concatenated together provided they share the same columns. This is very useful for showing summary statistics for a DataFrame, and is often used in combination with DataFrame.agg.\n",
+ "\n",
+ "Since the objects concatenated are Stylers they can independently be styled as will be shown below and their concatenation preserves those styles."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.143120Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.142910Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.152595Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.152184Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_92117\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_92117_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_92117_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_92117_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " <th id=\"T_92117_level0_col3\" class=\"col_heading level0 col3\" >3</th>\n",
+ " <th id=\"T_92117_level0_col4\" class=\"col_heading level0 col4\" >4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_92117_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_92117_row0_col0\" class=\"data row0 col0\" >0.4</td>\n",
+ " <td id=\"T_92117_row0_col1\" class=\"data row0 col1\" >-0.2</td>\n",
+ " <td id=\"T_92117_row0_col2\" class=\"data row0 col2\" >0.8</td>\n",
+ " <td id=\"T_92117_row0_col3\" class=\"data row0 col3\" >-1.8</td>\n",
+ " <td id=\"T_92117_row0_col4\" class=\"data row0 col4\" >-2.3</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_92117_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_92117_row1_col0\" class=\"data row1 col0\" >-0.1</td>\n",
+ " <td id=\"T_92117_row1_col1\" class=\"data row1 col1\" >-0.7</td>\n",
+ " <td id=\"T_92117_row1_col2\" class=\"data row1 col2\" >-1.3</td>\n",
+ " <td id=\"T_92117_row1_col3\" class=\"data row1 col3\" >1.6</td>\n",
+ " <td id=\"T_92117_row1_col4\" class=\"data row1 col4\" >-2.1</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_92117_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_92117_row2_col0\" class=\"data row2 col0\" >-0.2</td>\n",
+ " <td id=\"T_92117_row2_col1\" class=\"data row2 col1\" >1.8</td>\n",
+ " <td id=\"T_92117_row2_col2\" class=\"data row2 col2\" >-1.4</td>\n",
+ " <td id=\"T_92117_row2_col3\" class=\"data row2 col3\" >0.4</td>\n",
+ " <td id=\"T_92117_row2_col4\" class=\"data row2 col4\" >-0.0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_92117_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_92117_row3_col0\" class=\"data row3 col0\" >-0.1</td>\n",
+ " <td id=\"T_92117_row3_col1\" class=\"data row3 col1\" >-0.3</td>\n",
+ " <td id=\"T_92117_row3_col2\" class=\"data row3 col2\" >0.7</td>\n",
+ " <td id=\"T_92117_row3_col3\" class=\"data row3 col3\" >0.2</td>\n",
+ " <td id=\"T_92117_row3_col4\" class=\"data row3 col4\" >0.4</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_92117_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_92117_row4_col0\" class=\"data row4 col0\" >-2.0</td>\n",
+ " <td id=\"T_92117_row4_col1\" class=\"data row4 col1\" >0.6</td>\n",
+ " <td id=\"T_92117_row4_col2\" class=\"data row4 col2\" >0.1</td>\n",
+ " <td id=\"T_92117_row4_col3\" class=\"data row4 col3\" >-1.8</td>\n",
+ " <td id=\"T_92117_row4_col4\" class=\"data row4 col4\" >0.4</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_92117_level0_foot0_row0\" class=\"foot0_row_heading level0 foot0_row0\" >Sum</th>\n",
+ " <td id=\"T_92117_foot0_row0_col0\" class=\"foot0_data foot0_row0 col0\" >-2.066</td>\n",
+ " <td id=\"T_92117_foot0_row0_col1\" class=\"foot0_data foot0_row0 col1\" >1.263</td>\n",
+ " <td id=\"T_92117_foot0_row0_col2\" class=\"foot0_data foot0_row0 col2\" >-1.208</td>\n",
+ " <td id=\"T_92117_foot0_row0_col3\" class=\"foot0_data foot0_row0 col3\" >-1.357</td>\n",
+ " <td id=\"T_92117_foot0_row0_col4\" class=\"foot0_data foot0_row0 col4\" >-3.641</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_92117_level0_foot0_row1\" class=\"foot0_row_heading level0 foot0_row1\" >Average</th>\n",
+ " <td id=\"T_92117_foot0_row1_col0\" class=\"foot0_data foot0_row1 col0\" >-0.413</td>\n",
+ " <td id=\"T_92117_foot0_row1_col1\" class=\"foot0_data foot0_row1 col1\" >0.253</td>\n",
+ " <td id=\"T_92117_foot0_row1_col2\" class=\"foot0_data foot0_row1 col2\" >-0.242</td>\n",
+ " <td id=\"T_92117_foot0_row1_col3\" class=\"foot0_data foot0_row1 col3\" >-0.271</td>\n",
+ " <td id=\"T_92117_foot0_row1_col4\" class=\"foot0_data foot0_row1 col4\" >-0.728</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c17dabfa0>"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "summary_styler = df.agg([\"sum\", \"mean\"]).style \\\n",
+ " .format(precision=3) \\\n",
+ " .relabel_index([\"Sum\", \"Average\"])\n",
+ "df.style.format(precision=1).concat(summary_styler)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Styler Object and HTML \n",
+ "\n",
+ "The [Styler][styler] was originally constructed to support the wide array of HTML formatting options. Its HTML output creates an HTML `<table>` and leverages CSS styling language to manipulate many parameters including colors, fonts, borders, background, etc. See [here][w3schools] for more information on styling HTML tables. This allows a lot of flexibility out of the box, and even enables web developers to integrate DataFrames into their exiting user interface designs.\n",
+ "\n",
+ "Below we demonstrate the default output, which looks very similar to the standard DataFrame HTML representation. But the HTML here has already attached some CSS classes to each cell, even if we haven't yet created any styles. We can view these by calling the [.to_html()][tohtml] method, which returns the raw HTML as string, which is useful for further processing or adding to a file - read on in [More about CSS and HTML](#More-About-CSS-and-HTML). This section will also provide a walkthrough for how to convert this default output to represent a DataFrame output that is more communicative. For example how we can build `s`:\n",
+ "\n",
+ "[tohtml]: ../reference/api/pandas.io.formats.style.Styler.to_html.rst\n",
+ "\n",
+ "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n",
+ "[w3schools]: https://www.w3schools.com/html/html_tables.asp"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.155351Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.154933Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.162578Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.162141Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_d9f1f\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_d9f1f_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_d9f1f_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " <th id=\"T_d9f1f_level0_col4\" class=\"col_heading level0 col4\" colspan=\"2\">Random</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_d9f1f_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_d9f1f_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_d9f1f_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_d9f1f_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " <th id=\"T_d9f1f_level1_col4\" class=\"col_heading level1 col4\" >Tumour</th>\n",
+ " <th id=\"T_d9f1f_level1_col5\" class=\"col_heading level1 col5\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " <th class=\"blank col4\" > </th>\n",
+ " <th class=\"blank col5\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_d9f1f_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_d9f1f_row0_col0\" class=\"data row0 col0\" >38.000000</td>\n",
+ " <td id=\"T_d9f1f_row0_col1\" class=\"data row0 col1\" >2.000000</td>\n",
+ " <td id=\"T_d9f1f_row0_col2\" class=\"data row0 col2\" >18.000000</td>\n",
+ " <td id=\"T_d9f1f_row0_col3\" class=\"data row0 col3\" >22.000000</td>\n",
+ " <td id=\"T_d9f1f_row0_col4\" class=\"data row0 col4\" >21</td>\n",
+ " <td id=\"T_d9f1f_row0_col5\" class=\"data row0 col5\" >nan</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_d9f1f_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_d9f1f_row1_col0\" class=\"data row1 col0\" >19.000000</td>\n",
+ " <td id=\"T_d9f1f_row1_col1\" class=\"data row1 col1\" >439.000000</td>\n",
+ " <td id=\"T_d9f1f_row1_col2\" class=\"data row1 col2\" >6.000000</td>\n",
+ " <td id=\"T_d9f1f_row1_col3\" class=\"data row1 col3\" >452.000000</td>\n",
+ " <td id=\"T_d9f1f_row1_col4\" class=\"data row1 col4\" >226</td>\n",
+ " <td id=\"T_d9f1f_row1_col5\" class=\"data row1 col5\" >232.000000</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0ad9ae30>"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = pd.DataFrame([[38.0, 2.0, 18.0, 22.0, 21, np.nan],[19, 439, 6, 452, 226,232]], \n",
+ " index=pd.Index(['Tumour (Positive)', 'Non-Tumour (Negative)'], name='Actual Label:'), \n",
+ " columns=pd.MultiIndex.from_product([['Decision Tree', 'Regression', 'Random'],['Tumour', 'Non-Tumour']], names=['Model:', 'Predicted:']))\n",
+ "df.style"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.164723Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.164521Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.173906Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.173373Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "# Hidden cell to just create the below example: code is covered throughout the guide.\n",
+ "s = df.style\\\n",
+ " .hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis='columns')\\\n",
+ " .format('{:.0f}')\\\n",
+ " .set_table_styles([{\n",
+ " 'selector': '',\n",
+ " 'props': 'border-collapse: separate;'\n",
+ " },{\n",
+ " 'selector': 'caption',\n",
+ " 'props': 'caption-side: bottom; font-size:1.3em;'\n",
+ " },{\n",
+ " 'selector': '.index_name',\n",
+ " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
+ " },{\n",
+ " 'selector': 'th:not(.index_name)',\n",
+ " 'props': 'background-color: #000066; color: white;'\n",
+ " },{\n",
+ " 'selector': 'th.col_heading',\n",
+ " 'props': 'text-align: center;'\n",
+ " },{\n",
+ " 'selector': 'th.col_heading.level0',\n",
+ " 'props': 'font-size: 1.5em;'\n",
+ " },{\n",
+ " 'selector': 'th.col2',\n",
+ " 'props': 'border-left: 1px solid white;'\n",
+ " },{\n",
+ " 'selector': '.col2',\n",
+ " 'props': 'border-left: 1px solid #000066;'\n",
+ " },{\n",
+ " 'selector': 'td',\n",
+ " 'props': 'text-align: center; font-weight:bold;'\n",
+ " },{\n",
+ " 'selector': '.true',\n",
+ " 'props': 'background-color: #e6ffe6;'\n",
+ " },{\n",
+ " 'selector': '.false',\n",
+ " 'props': 'background-color: #ffe6e6;'\n",
+ " },{\n",
+ " 'selector': '.border-red',\n",
+ " 'props': 'border: 2px dashed red;'\n",
+ " },{\n",
+ " 'selector': '.border-green',\n",
+ " 'props': 'border: 2px dashed green;'\n",
+ " },{\n",
+ " 'selector': 'td:hover',\n",
+ " 'props': 'background-color: #ffffb3;'\n",
+ " }])\\\n",
+ " .set_td_classes(pd.DataFrame([['true border-green', 'false', 'true', 'false border-red', '', ''],\n",
+ " ['false', 'true', 'false', 'true', '', '']], \n",
+ " index=df.index, columns=df.columns))\\\n",
+ " .set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
+ " .set_tooltips(pd.DataFrame([['This model has a very strong true positive rate', '', '', \"This model's total number of false negatives is too high\", '', ''],\n",
+ " ['', '', '', '', '', '']], \n",
+ " index=df.index, columns=df.columns),\n",
+ " css_class='pd-tt', props=\n",
+ " 'visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
+ " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
+ " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.176107Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.175875Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.182525Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.181979Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_86c30 {\n",
+ " border-collapse: separate;\n",
+ "}\n",
+ "#T_86c30 caption {\n",
+ " caption-side: bottom;\n",
+ " font-size: 1.3em;\n",
+ "}\n",
+ "#T_86c30 .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_86c30 th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_86c30 th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_86c30 th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_86c30 th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_86c30 .col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_86c30 td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_86c30 .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_86c30 .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "#T_86c30 .border-red {\n",
+ " border: 2px dashed red;\n",
+ "}\n",
+ "#T_86c30 .border-green {\n",
+ " border: 2px dashed green;\n",
+ "}\n",
+ "#T_86c30 td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_86c30 .pd-tt {\n",
+ " visibility: hidden;\n",
+ " position: absolute;\n",
+ " z-index: 1;\n",
+ " border: 1px solid #000066;\n",
+ " background-color: white;\n",
+ " color: #000066;\n",
+ " font-size: 0.8em;\n",
+ " transform: translate(0px, -24px);\n",
+ " padding: 0.6em;\n",
+ " border-radius: 0.5em;\n",
+ "}\n",
+ "#T_86c30 #T_86c30_row0_col0:hover .pd-tt {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_86c30 #T_86c30_row0_col0 .pd-tt::after {\n",
+ " content: \"This model has a very strong true positive rate\";\n",
+ "}\n",
+ "#T_86c30 #T_86c30_row0_col3:hover .pd-tt {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_86c30 #T_86c30_row0_col3 .pd-tt::after {\n",
+ " content: \"This model's total number of false negatives is too high\";\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_86c30\">\n",
+ " <caption>Confusion matrix for multiple cancer prediction models.</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_86c30_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_86c30_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_86c30_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_86c30_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_86c30_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_86c30_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_86c30_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_86c30_row0_col0\" class=\"data row0 col0 true border-green\" >38<span class=\"pd-tt\"></span></td>\n",
+ " <td id=\"T_86c30_row0_col1\" class=\"data row0 col1 false\" >2<span class=\"pd-tt\"></span></td>\n",
+ " <td id=\"T_86c30_row0_col2\" class=\"data row0 col2 true\" >18<span class=\"pd-tt\"></span></td>\n",
+ " <td id=\"T_86c30_row0_col3\" class=\"data row0 col3 false border-red\" >22<span class=\"pd-tt\"></span></td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_86c30_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_86c30_row1_col0\" class=\"data row1 col0 false\" >19<span class=\"pd-tt\"></span></td>\n",
+ " <td id=\"T_86c30_row1_col1\" class=\"data row1 col1 true\" >439<span class=\"pd-tt\"></span></td>\n",
+ " <td id=\"T_86c30_row1_col2\" class=\"data row1 col2 false\" >6<span class=\"pd-tt\"></span></td>\n",
+ " <td id=\"T_86c30_row1_col3\" class=\"data row1 col3 true\" >452<span class=\"pd-tt\"></span></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adad030>"
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The first step we have taken is the create the Styler object from the DataFrame and then select the range of interest by hiding unwanted columns with [.hide()][hideidx].\n",
+ "\n",
+ "[hideidx]: ../reference/api/pandas.io.formats.style.Styler.hide.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.184765Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.184490Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.192542Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.192102Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_1525a\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_1525a_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_1525a_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_1525a_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_1525a_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_1525a_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_1525a_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_1525a_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_1525a_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_1525a_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_1525a_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_1525a_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_1525a_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_1525a_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_1525a_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_1525a_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_1525a_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s = df.style.format('{:.0f}').hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis=\"columns\")\n",
+ "s"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.194488Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.194316Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.199593Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.199177Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_after_hide\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_hide_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_hide_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_hide_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_hide_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_hide_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_hide_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_hide_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_hide_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_after_hide_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_after_hide_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_after_hide_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_hide_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_hide_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_after_hide_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_after_hide_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_after_hide_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_hide')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Methods to Add Styles\n",
+ "\n",
+ "There are **3 primary methods of adding custom CSS styles** to [Styler][styler]:\n",
+ "\n",
+ "- Using [.set_table_styles()][table] to control broader areas of the table with specified internal CSS. Although table styles allow the flexibility to add CSS selectors and properties controlling all individual parts of the table, they are unwieldy for individual cell specifications. Also, note that table styles cannot be exported to Excel. \n",
+ "- Using [.set_td_classes()][td_class] to directly link either external CSS classes to your data cells or link the internal CSS classes created by [.set_table_styles()][table]. See [here](#Setting-Classes-and-Linking-to-External-CSS). These cannot be used on column header rows or indexes, and also won't export to Excel. \n",
+ "- Using the [.apply()][apply] and [.map()][map] functions to add direct internal CSS to specific data cells. See [here](#Styler-Functions). As of v1.4.0 there are also methods that work directly on column header rows or indexes; [.apply_index()][applyindex] and [.map_index()][mapindex]. Note that only these methods add styles that will export to Excel. These methods work in a similar way to [DataFrame.apply()][dfapply] and [DataFrame.map()][dfmap].\n",
+ "\n",
+ "[table]: ../reference/api/pandas.io.formats.style.Styler.set_table_styles.rst\n",
+ "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n",
+ "[td_class]: ../reference/api/pandas.io.formats.style.Styler.set_td_classes.rst\n",
+ "[apply]: ../reference/api/pandas.io.formats.style.Styler.apply.rst\n",
+ "[map]: ../reference/api/pandas.io.formats.style.Styler.map.rst\n",
+ "[applyindex]: ../reference/api/pandas.io.formats.style.Styler.apply_index.rst\n",
+ "[mapindex]: ../reference/api/pandas.io.formats.style.Styler.map_index.rst\n",
+ "[dfapply]: ../reference/api/pandas.DataFrame.apply.rst\n",
+ "[dfmap]: ../reference/api/pandas.DataFrame.map.rst"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Table Styles\n",
+ "\n",
+ "Table styles are flexible enough to control all individual parts of the table, including column headers and indexes. \n",
+ "However, they can be unwieldy to type for individual data cells or for any kind of conditional formatting, so we recommend that table styles are used for broad styling, such as entire rows or columns at a time.\n",
+ "\n",
+ "Table styles are also used to control features which can apply to the whole table at once such as creating a generic hover functionality. The `:hover` pseudo-selector, as well as other pseudo-selectors, can only be used this way.\n",
+ "\n",
+ "To replicate the normal format of CSS selectors and properties (attribute value pairs), e.g. \n",
+ "\n",
+ "```\n",
+ "tr:hover {\n",
+ " background-color: #ffff99;\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "the necessary format to pass styles to [.set_table_styles()][table] is as a list of dicts, each with a CSS-selector tag and CSS-properties. Properties can either be a list of 2-tuples, or a regular CSS-string, for example:\n",
+ "\n",
+ "[table]: ../reference/api/pandas.io.formats.style.Styler.set_table_styles.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.201681Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.201518Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.207580Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.207028Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_hide td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_hide .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_hide th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_hide\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_hide_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_hide_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_hide_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_hide_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_hide_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_hide_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_hide_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_hide_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_after_hide_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_after_hide_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_after_hide_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_hide_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_hide_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_after_hide_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_after_hide_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_after_hide_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "cell_hover = { # for row hover use <tr> instead of <td>\n",
+ " 'selector': 'td:hover',\n",
+ " 'props': [('background-color', '#ffffb3')]\n",
+ "}\n",
+ "index_names = {\n",
+ " 'selector': '.index_name',\n",
+ " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
+ "}\n",
+ "headers = {\n",
+ " 'selector': 'th:not(.index_name)',\n",
+ " 'props': 'background-color: #000066; color: white;'\n",
+ "}\n",
+ "s.set_table_styles([cell_hover, index_names, headers])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.209729Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.209543Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.214710Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.214307Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_tab_styles1 td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_tab_styles1 .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_tab_styles1 th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_tab_styles1\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_tab_styles1_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_tab_styles1_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles1_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_tab_styles1_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_after_tab_styles1_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_after_tab_styles1_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_after_tab_styles1_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles1_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_tab_styles1_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_after_tab_styles1_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_after_tab_styles1_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_after_tab_styles1_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_tab_styles1')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next we just add a couple more styling artifacts targeting specific parts of the table. Be careful here, since we are *chaining methods* we need to explicitly instruct the method **not to** ``overwrite`` the existing styles."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.217046Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.216696Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.222474Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.221910Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_tab_styles1 td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_tab_styles1 .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_tab_styles1 th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_tab_styles1 th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_tab_styles1 th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_tab_styles1 td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_tab_styles1\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_tab_styles1_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_tab_styles1_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles1_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles1_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_tab_styles1_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_after_tab_styles1_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_after_tab_styles1_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_after_tab_styles1_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles1_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_tab_styles1_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_after_tab_styles1_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_after_tab_styles1_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_after_tab_styles1_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s.set_table_styles([\n",
+ " {'selector': 'th.col_heading', 'props': 'text-align: center;'},\n",
+ " {'selector': 'th.col_heading.level0', 'props': 'font-size: 1.5em;'},\n",
+ " {'selector': 'td', 'props': 'text-align: center; font-weight: bold;'},\n",
+ "], overwrite=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.224676Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.224440Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.229731Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.229298Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_tab_styles2 td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_tab_styles2 .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_tab_styles2 th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_tab_styles2 th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_tab_styles2 th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_tab_styles2 td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_tab_styles2\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_tab_styles2_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_tab_styles2_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles2_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_tab_styles2_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_after_tab_styles2_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_after_tab_styles2_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_after_tab_styles2_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles2_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_tab_styles2_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_after_tab_styles2_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_after_tab_styles2_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_after_tab_styles2_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_tab_styles2')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As a convenience method (*since version 1.2.0*) we can also pass a **dict** to [.set_table_styles()][table] which contains row or column keys. Behind the scenes Styler just indexes the keys and adds relevant `.col<m>` or `.row<n>` classes as necessary to the given CSS selectors.\n",
+ "\n",
+ "[table]: ../reference/api/pandas.io.formats.style.Styler.set_table_styles.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.231761Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.231595Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.238399Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.237817Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_tab_styles2 td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_tab_styles2 .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_tab_styles2 th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_tab_styles2 th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_tab_styles2 th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_tab_styles2 td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_tab_styles2 th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_tab_styles2 td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_tab_styles2\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_tab_styles2_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_tab_styles2_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_tab_styles2_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles2_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_tab_styles2_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_after_tab_styles2_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_after_tab_styles2_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_after_tab_styles2_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tab_styles2_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_tab_styles2_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_after_tab_styles2_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_after_tab_styles2_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_after_tab_styles2_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s.set_table_styles({\n",
+ " ('Regression', 'Tumour'): [{'selector': 'th', 'props': 'border-left: 1px solid white'},\n",
+ " {'selector': 'td', 'props': 'border-left: 1px solid #000066'}]\n",
+ "}, overwrite=False, axis=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.240686Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.240443Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.245785Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.245354Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_xyz01 td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_xyz01 .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_xyz01 th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_xyz01 th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_xyz01 th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_xyz01 td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_xyz01 th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_xyz01 td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_xyz01\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_xyz01_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_xyz01_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_xyz01_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_xyz01_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_xyz01_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_xyz01_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_xyz01_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_xyz01_row0_col0\" class=\"data row0 col0\" >38</td>\n",
+ " <td id=\"T_xyz01_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " <td id=\"T_xyz01_row0_col2\" class=\"data row0 col2\" >18</td>\n",
+ " <td id=\"T_xyz01_row0_col3\" class=\"data row0 col3\" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_xyz01_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_xyz01_row1_col0\" class=\"data row1 col0\" >19</td>\n",
+ " <td id=\"T_xyz01_row1_col1\" class=\"data row1 col1\" >439</td>\n",
+ " <td id=\"T_xyz01_row1_col2\" class=\"data row1 col2\" >6</td>\n",
+ " <td id=\"T_xyz01_row1_col3\" class=\"data row1 col3\" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('xyz01')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Setting Classes and Linking to External CSS\n",
+ "\n",
+ "If you have designed a website then it is likely you will already have an external CSS file that controls the styling of table and cell objects within it. You may want to use these native files rather than duplicate all the CSS in python (and duplicate any maintenance work).\n",
+ "\n",
+ "### Table Attributes\n",
+ "\n",
+ "It is very easy to add a `class` to the main `<table>` using [.set_table_attributes()][tableatt]. This method can also attach inline styles - read more in [CSS Hierarchies](#CSS-Hierarchies).\n",
+ "\n",
+ "[tableatt]: ../reference/api/pandas.io.formats.style.Styler.set_table_attributes.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.247809Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.247642Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.252630Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.252098Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "<table id=\"T_xyz01\" class=\"my-table-cls\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n"
+ ]
+ }
+ ],
+ "source": [
+ "out = s.set_table_attributes('class=\"my-table-cls\"').to_html()\n",
+ "print(out[out.find('<table'):][:109])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Data Cell CSS Classes\n",
+ "\n",
+ "*New in version 1.2.0*\n",
+ "\n",
+ "The [.set_td_classes()][tdclass] method accepts a DataFrame with matching indices and columns to the underlying [Styler][styler]'s DataFrame. That DataFrame will contain strings as css-classes to add to individual data cells: the `<td>` elements of the `<table>`. Rather than use external CSS we will create our classes internally and add them to table style. We will save adding the borders until the [section on tooltips](#Tooltips-and-Captions).\n",
+ "\n",
+ "[tdclass]: ../reference/api/pandas.io.formats.style.Styler.set_td_classes.rst\n",
+ "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.254941Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.254692Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.262469Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.261937Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_xyz01 td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_xyz01 .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_xyz01 th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_xyz01 th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_xyz01 th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_xyz01 td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_xyz01 th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_xyz01 td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_xyz01 .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_xyz01 .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_xyz01\" class=\"my-table-cls\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_xyz01_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_xyz01_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_xyz01_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_xyz01_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_xyz01_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_xyz01_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_xyz01_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_xyz01_row0_col0\" class=\"data row0 col0 true \" >38</td>\n",
+ " <td id=\"T_xyz01_row0_col1\" class=\"data row0 col1 false \" >2</td>\n",
+ " <td id=\"T_xyz01_row0_col2\" class=\"data row0 col2 true \" >18</td>\n",
+ " <td id=\"T_xyz01_row0_col3\" class=\"data row0 col3 false \" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_xyz01_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_xyz01_row1_col0\" class=\"data row1 col0 false \" >19</td>\n",
+ " <td id=\"T_xyz01_row1_col1\" class=\"data row1 col1 true \" >439</td>\n",
+ " <td id=\"T_xyz01_row1_col2\" class=\"data row1 col2 false \" >6</td>\n",
+ " <td id=\"T_xyz01_row1_col3\" class=\"data row1 col3 true \" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s.set_table_styles([ # create internal CSS classes\n",
+ " {'selector': '.true', 'props': 'background-color: #e6ffe6;'},\n",
+ " {'selector': '.false', 'props': 'background-color: #ffe6e6;'},\n",
+ "], overwrite=False)\n",
+ "cell_color = pd.DataFrame([['true ', 'false ', 'true ', 'false '], \n",
+ " ['false ', 'true ', 'false ', 'true ']], \n",
+ " index=df.index, \n",
+ " columns=df.columns[:4])\n",
+ "s.set_td_classes(cell_color)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.264794Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.264462Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.269827Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.269307Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_classes td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_classes .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_classes th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_classes th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_classes th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_classes td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_classes th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_classes td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_after_classes .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_after_classes .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_classes\" class=\"my-table-cls\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_classes_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_classes_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_classes_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_classes_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_classes_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_classes_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_classes_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_classes_row0_col0\" class=\"data row0 col0 true \" >38</td>\n",
+ " <td id=\"T_after_classes_row0_col1\" class=\"data row0 col1 false \" >2</td>\n",
+ " <td id=\"T_after_classes_row0_col2\" class=\"data row0 col2 true \" >18</td>\n",
+ " <td id=\"T_after_classes_row0_col3\" class=\"data row0 col3 false \" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_classes_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_classes_row1_col0\" class=\"data row1 col0 false \" >19</td>\n",
+ " <td id=\"T_after_classes_row1_col1\" class=\"data row1 col1 true \" >439</td>\n",
+ " <td id=\"T_after_classes_row1_col2\" class=\"data row1 col2 false \" >6</td>\n",
+ " <td id=\"T_after_classes_row1_col3\" class=\"data row1 col3 true \" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_classes')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Styler Functions\n",
+ "\n",
+ "### Acting on Data\n",
+ "\n",
+ "We use the following methods to pass your style functions. Both of those methods take a function (and some other keyword arguments) and apply it to the DataFrame in a certain way, rendering CSS styles.\n",
+ "\n",
+ "- [.map()][map] (elementwise): accepts a function that takes a single value and returns a string with the CSS attribute-value pair.\n",
+ "- [.apply()][apply] (column-/row-/table-wise): accepts a function that takes a Series or DataFrame and returns a Series, DataFrame, or numpy array with an identical shape where each element is a string with a CSS attribute-value pair. This method passes each column or row of your DataFrame one-at-a-time or the entire table at once, depending on the `axis` keyword argument. For columnwise use `axis=0`, rowwise use `axis=1`, and for the entire table at once use `axis=None`.\n",
+ "\n",
+ "This method is powerful for applying multiple, complex logic to data cells. We create a new DataFrame to demonstrate this.\n",
+ "\n",
+ "[apply]: ../reference/api/pandas.io.formats.style.Styler.apply.rst\n",
+ "[map]: ../reference/api/pandas.io.formats.style.Styler.map.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.272443Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.272064Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.278001Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.277523Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_a7436\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_a7436_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_a7436_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_a7436_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_a7436_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_a7436_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_a7436_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_a7436_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_a7436_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_a7436_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_a7436_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_a7436_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_a7436_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_a7436_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_a7436_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_a7436_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_a7436_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_a7436_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_a7436_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_a7436_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_a7436_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_a7436_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_a7436_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_a7436_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_a7436_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_a7436_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_a7436_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_a7436_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_a7436_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_a7436_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_a7436_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_a7436_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_a7436_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_a7436_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_a7436_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_a7436_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_a7436_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_a7436_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_a7436_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_a7436_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_a7436_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_a7436_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_a7436_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_a7436_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_a7436_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_a7436_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c40791870>"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "np.random.seed(0)\n",
+ "df2 = pd.DataFrame(np.random.randn(10,4), columns=['A','B','C','D'])\n",
+ "df2.style"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For example we can build a function that colors text if it is negative, and chain this with a function that partially fades cells of negligible value. Since this looks at each element in turn we use ``map``."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.280051Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.279877Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.287593Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.286559Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_60891_row1_col1, #T_60891_row4_col3, #T_60891_row5_col0, #T_60891_row5_col3, #T_60891_row6_col1, #T_60891_row8_col0, #T_60891_row8_col1, #T_60891_row8_col2, #T_60891_row9_col2, #T_60891_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_60891_row1_col3, #T_60891_row2_col0, #T_60891_row4_col1, #T_60891_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_60891_row2_col2, #T_60891_row3_col1, #T_60891_row6_col2, #T_60891_row7_col2, #T_60891_row8_col3 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_60891\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_60891_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_60891_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_60891_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_60891_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_60891_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_60891_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_60891_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_60891_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_60891_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_60891_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_60891_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_60891_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_60891_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_60891_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_60891_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_60891_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_60891_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_60891_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_60891_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_60891_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_60891_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_60891_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_60891_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_60891_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_60891_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_60891_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_60891_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_60891_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_60891_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_60891_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_60891_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_60891_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_60891_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_60891_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_60891_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_60891_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_60891_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_60891_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_60891_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_60891_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_60891_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_60891_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_60891_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_60891_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_60891_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adae350>"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "def style_negative(v, props=''):\n",
+ " return props if v < 0 else None\n",
+ "s2 = df2.style.map(style_negative, props='color:red;')\\\n",
+ " .map(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\n",
+ "s2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.289840Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.289647Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.295982Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.295545Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_applymap_row1_col1, #T_after_applymap_row4_col3, #T_after_applymap_row5_col0, #T_after_applymap_row5_col3, #T_after_applymap_row6_col1, #T_after_applymap_row8_col0, #T_after_applymap_row8_col1, #T_after_applymap_row8_col2, #T_after_applymap_row9_col2, #T_after_applymap_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_after_applymap_row1_col3, #T_after_applymap_row2_col0, #T_after_applymap_row4_col1, #T_after_applymap_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_applymap_row2_col2, #T_after_applymap_row3_col1, #T_after_applymap_row6_col2, #T_after_applymap_row7_col2, #T_after_applymap_row8_col3 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_applymap\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_after_applymap_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_after_applymap_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_after_applymap_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_after_applymap_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_after_applymap_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_after_applymap_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_after_applymap_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_after_applymap_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_after_applymap_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_after_applymap_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_after_applymap_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_after_applymap_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_after_applymap_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_after_applymap_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_after_applymap_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_after_applymap_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_after_applymap_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_after_applymap_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_after_applymap_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_after_applymap_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_after_applymap_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_after_applymap_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_after_applymap_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_after_applymap_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_after_applymap_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_after_applymap_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_after_applymap_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_after_applymap_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_after_applymap_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_after_applymap_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_after_applymap_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_after_applymap_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_after_applymap_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_after_applymap_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_after_applymap_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_after_applymap_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_after_applymap_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_after_applymap_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_after_applymap_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_after_applymap_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_after_applymap_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_after_applymap_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_after_applymap_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_after_applymap_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adae350>"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s2.set_uuid('after_applymap')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can also build a function that highlights the maximum value across rows, cols, and the DataFrame all at once. In this case we use ``apply``. Below we highlight the maximum in a column."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.297867Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.297701Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.305958Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.305438Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_applymap_row0_col2, #T_after_applymap_row0_col3, #T_after_applymap_row6_col0, #T_after_applymap_row7_col1 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "#T_after_applymap_row1_col1, #T_after_applymap_row4_col3, #T_after_applymap_row5_col0, #T_after_applymap_row5_col3, #T_after_applymap_row6_col1, #T_after_applymap_row8_col0, #T_after_applymap_row8_col1, #T_after_applymap_row8_col2, #T_after_applymap_row9_col2, #T_after_applymap_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_after_applymap_row1_col3, #T_after_applymap_row2_col0, #T_after_applymap_row4_col1, #T_after_applymap_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_applymap_row2_col2, #T_after_applymap_row3_col1, #T_after_applymap_row6_col2, #T_after_applymap_row7_col2, #T_after_applymap_row8_col3 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_applymap\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_after_applymap_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_after_applymap_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_after_applymap_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_after_applymap_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_after_applymap_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_after_applymap_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_after_applymap_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_after_applymap_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_after_applymap_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_after_applymap_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_after_applymap_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_after_applymap_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_after_applymap_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_after_applymap_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_after_applymap_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_after_applymap_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_after_applymap_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_after_applymap_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_after_applymap_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_after_applymap_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_after_applymap_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_after_applymap_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_after_applymap_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_after_applymap_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_after_applymap_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_after_applymap_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_after_applymap_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_after_applymap_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_after_applymap_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_after_applymap_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_after_applymap_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_after_applymap_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_after_applymap_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_after_applymap_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_after_applymap_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_after_applymap_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_after_applymap_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_after_applymap_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_after_applymap_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_after_applymap_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_applymap_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_after_applymap_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_after_applymap_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_after_applymap_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_after_applymap_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adae350>"
+ ]
+ },
+ "execution_count": 25,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "def highlight_max(s, props=''):\n",
+ " return np.where(s == np.nanmax(s.values), props, '')\n",
+ "s2.apply(highlight_max, props='color:white;background-color:darkblue', axis=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.308159Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.307924Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.315250Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.314717Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_apply_row0_col2, #T_after_apply_row0_col3, #T_after_apply_row6_col0, #T_after_apply_row7_col1 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "#T_after_apply_row1_col1, #T_after_apply_row4_col3, #T_after_apply_row5_col0, #T_after_apply_row5_col3, #T_after_apply_row6_col1, #T_after_apply_row8_col0, #T_after_apply_row8_col1, #T_after_apply_row8_col2, #T_after_apply_row9_col2, #T_after_apply_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_after_apply_row1_col3, #T_after_apply_row2_col0, #T_after_apply_row4_col1, #T_after_apply_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_apply_row2_col2, #T_after_apply_row3_col1, #T_after_apply_row6_col2, #T_after_apply_row7_col2, #T_after_apply_row8_col3 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_apply\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_after_apply_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_after_apply_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_after_apply_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_after_apply_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_after_apply_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_after_apply_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_after_apply_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_after_apply_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_after_apply_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_after_apply_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_after_apply_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_after_apply_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_after_apply_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_after_apply_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_after_apply_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_after_apply_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_after_apply_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_after_apply_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_after_apply_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_after_apply_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_after_apply_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_after_apply_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_after_apply_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_after_apply_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_after_apply_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_after_apply_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_after_apply_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_after_apply_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_after_apply_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_after_apply_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_after_apply_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_after_apply_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_after_apply_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_after_apply_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_after_apply_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_after_apply_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_after_apply_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_after_apply_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_after_apply_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_after_apply_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_after_apply_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_after_apply_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_after_apply_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_after_apply_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adae350>"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s2.set_uuid('after_apply')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can use the same function across the different axes, highlighting here the DataFrame maximum in purple, and row maximums in pink."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.317325Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.317158Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.327235Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.326692Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_apply_row0_col2, #T_after_apply_row7_col1 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "#T_after_apply_row0_col3 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_after_apply_row1_col0, #T_after_apply_row2_col3, #T_after_apply_row3_col0, #T_after_apply_row4_col0, #T_after_apply_row5_col2, #T_after_apply_row7_col0, #T_after_apply_row9_col0 {\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_after_apply_row1_col1, #T_after_apply_row4_col3, #T_after_apply_row5_col0, #T_after_apply_row5_col3, #T_after_apply_row6_col1, #T_after_apply_row8_col0, #T_after_apply_row8_col1, #T_after_apply_row8_col2, #T_after_apply_row9_col2, #T_after_apply_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_after_apply_row1_col3, #T_after_apply_row2_col0, #T_after_apply_row4_col1, #T_after_apply_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_apply_row2_col2, #T_after_apply_row3_col1, #T_after_apply_row6_col2, #T_after_apply_row7_col2 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_apply_row6_col0 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ " color: white;\n",
+ " background-color: purple;\n",
+ "}\n",
+ "#T_after_apply_row8_col3 {\n",
+ " opacity: 20%;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_apply\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_after_apply_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_after_apply_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_after_apply_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_after_apply_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_after_apply_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_after_apply_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_after_apply_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_after_apply_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_after_apply_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_after_apply_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_after_apply_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_after_apply_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_after_apply_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_after_apply_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_after_apply_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_after_apply_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_after_apply_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_after_apply_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_after_apply_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_after_apply_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_after_apply_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_after_apply_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_after_apply_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_after_apply_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_after_apply_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_after_apply_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_after_apply_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_after_apply_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_after_apply_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_after_apply_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_after_apply_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_after_apply_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_after_apply_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_after_apply_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_after_apply_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_after_apply_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_after_apply_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_after_apply_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_after_apply_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_after_apply_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_after_apply_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_after_apply_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_after_apply_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_after_apply_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adae350>"
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s2.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
+ " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.329192Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.329023Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.338928Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.338375Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_apply_again_row0_col2, #T_after_apply_again_row7_col1 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "#T_after_apply_again_row0_col3 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_after_apply_again_row1_col0, #T_after_apply_again_row2_col3, #T_after_apply_again_row3_col0, #T_after_apply_again_row4_col0, #T_after_apply_again_row5_col2, #T_after_apply_again_row7_col0, #T_after_apply_again_row9_col0 {\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_after_apply_again_row1_col1, #T_after_apply_again_row4_col3, #T_after_apply_again_row5_col0, #T_after_apply_again_row5_col3, #T_after_apply_again_row6_col1, #T_after_apply_again_row8_col0, #T_after_apply_again_row8_col1, #T_after_apply_again_row8_col2, #T_after_apply_again_row9_col2, #T_after_apply_again_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_after_apply_again_row1_col3, #T_after_apply_again_row2_col0, #T_after_apply_again_row4_col1, #T_after_apply_again_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_apply_again_row2_col2, #T_after_apply_again_row3_col1, #T_after_apply_again_row6_col2, #T_after_apply_again_row7_col2 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_apply_again_row6_col0 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ " color: white;\n",
+ " background-color: purple;\n",
+ "}\n",
+ "#T_after_apply_again_row8_col3 {\n",
+ " opacity: 20%;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_apply_again\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_after_apply_again_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_after_apply_again_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_after_apply_again_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_after_apply_again_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_after_apply_again_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_after_apply_again_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_after_apply_again_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_after_apply_again_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_after_apply_again_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_after_apply_again_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_after_apply_again_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_after_apply_again_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_after_apply_again_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_after_apply_again_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_after_apply_again_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_after_apply_again_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_after_apply_again_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_after_apply_again_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_after_apply_again_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_after_apply_again_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_after_apply_again_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_after_apply_again_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_after_apply_again_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_after_apply_again_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_after_apply_again_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_after_apply_again_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_after_apply_again_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_after_apply_again_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_after_apply_again_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_after_apply_again_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_after_apply_again_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_after_apply_again_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_after_apply_again_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_after_apply_again_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_after_apply_again_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_after_apply_again_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_after_apply_again_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_after_apply_again_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_after_apply_again_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_after_apply_again_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_after_apply_again_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_after_apply_again_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_after_apply_again_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_after_apply_again_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adae350>"
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s2.set_uuid('after_apply_again')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This last example shows how some styles have been overwritten by others. In general the most recent style applied is active but you can read more in the [section on CSS hierarchies](#CSS-Hierarchies). You can also apply these styles to more granular parts of the DataFrame - read more in section on [subset slicing](#Finer-Control-with-Slicing).\n",
+ "\n",
+ "It is possible to replicate some of this functionality using just classes but it can be more cumbersome. See [item 3) of Optimization](#Optimization)\n",
+ "\n",
+ "<div class=\"alert alert-info\">\n",
+ "\n",
+ "*Debugging Tip*: If you're having trouble writing your style function, try just passing it into ``DataFrame.apply``. Internally, ``Styler.apply`` uses ``DataFrame.apply`` so the result should be the same, and with ``DataFrame.apply`` you will be able to inspect the CSS string output of your intended function in each cell.\n",
+ "\n",
+ "</div>"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Acting on the Index and Column Headers\n",
+ "\n",
+ "Similar application is achieved for headers by using:\n",
+ " \n",
+ "- [.map_index()][mapindex] (elementwise): accepts a function that takes a single value and returns a string with the CSS attribute-value pair.\n",
+ "- [.apply_index()][applyindex] (level-wise): accepts a function that takes a Series and returns a Series, or numpy array with an identical shape where each element is a string with a CSS attribute-value pair. This method passes each level of your Index one-at-a-time. To style the index use `axis=0` and to style the column headers use `axis=1`.\n",
+ "\n",
+ "You can select a `level` of a `MultiIndex` but currently no similar `subset` application is available for these methods.\n",
+ "\n",
+ "[applyindex]: ../reference/api/pandas.io.formats.style.Styler.apply_index.rst\n",
+ "[mapindex]: ../reference/api/pandas.io.formats.style.Styler.map_index.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.341247Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.341011Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.352890Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.352397Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_apply_again_row0_col2, #T_after_apply_again_row7_col1 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "#T_after_apply_again_row0_col3 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_after_apply_again_row1_col0, #T_after_apply_again_row2_col3, #T_after_apply_again_row3_col0, #T_after_apply_again_row4_col0, #T_after_apply_again_row5_col2, #T_after_apply_again_row7_col0, #T_after_apply_again_row9_col0 {\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_after_apply_again_row1_col1, #T_after_apply_again_row4_col3, #T_after_apply_again_row5_col0, #T_after_apply_again_row5_col3, #T_after_apply_again_row6_col1, #T_after_apply_again_row8_col0, #T_after_apply_again_row8_col1, #T_after_apply_again_row8_col2, #T_after_apply_again_row9_col2, #T_after_apply_again_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_after_apply_again_row1_col3, #T_after_apply_again_row2_col0, #T_after_apply_again_row4_col1, #T_after_apply_again_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_apply_again_row2_col2, #T_after_apply_again_row3_col1, #T_after_apply_again_row6_col2, #T_after_apply_again_row7_col2 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_after_apply_again_row6_col0 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ " color: white;\n",
+ " background-color: purple;\n",
+ "}\n",
+ "#T_after_apply_again_row8_col3 {\n",
+ " opacity: 20%;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_after_apply_again_level0_row0, #T_after_apply_again_level0_row1, #T_after_apply_again_level0_row2, #T_after_apply_again_level0_row3, #T_after_apply_again_level0_row4 {\n",
+ " color: darkblue;\n",
+ "}\n",
+ "#T_after_apply_again_level0_row5, #T_after_apply_again_level0_row6, #T_after_apply_again_level0_row7, #T_after_apply_again_level0_row8, #T_after_apply_again_level0_row9 {\n",
+ " color: pink;\n",
+ "}\n",
+ "#T_after_apply_again_level0_col0, #T_after_apply_again_level0_col1 {\n",
+ " color: pink;\n",
+ "}\n",
+ "#T_after_apply_again_level0_col2, #T_after_apply_again_level0_col3 {\n",
+ " color: darkblue;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_apply_again\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_after_apply_again_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_after_apply_again_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_after_apply_again_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_after_apply_again_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_after_apply_again_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_after_apply_again_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_after_apply_again_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_after_apply_again_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_after_apply_again_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_after_apply_again_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_after_apply_again_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_after_apply_again_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_after_apply_again_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_after_apply_again_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_after_apply_again_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_after_apply_again_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_after_apply_again_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_after_apply_again_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_after_apply_again_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_after_apply_again_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_after_apply_again_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_after_apply_again_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_after_apply_again_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_after_apply_again_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_after_apply_again_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_after_apply_again_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_after_apply_again_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_after_apply_again_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_after_apply_again_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_after_apply_again_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_after_apply_again_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_after_apply_again_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_after_apply_again_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_after_apply_again_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_after_apply_again_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_after_apply_again_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_after_apply_again_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_after_apply_again_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_after_apply_again_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_after_apply_again_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_apply_again_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_after_apply_again_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_after_apply_again_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_after_apply_again_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_after_apply_again_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adae350>"
+ ]
+ },
+ "execution_count": 29,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s2.map_index(lambda v: \"color:pink;\" if v>4 else \"color:darkblue;\", axis=0)\n",
+ "s2.apply_index(lambda s: np.where(s.isin([\"A\", \"B\"]), \"color:pink;\", \"color:darkblue;\"), axis=1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Tooltips and Captions\n",
+ "\n",
+ "Table captions can be added with the [.set_caption()][caption] method. You can use table styles to control the CSS relevant to the caption.\n",
+ "\n",
+ "[caption]: ../reference/api/pandas.io.formats.style.Styler.set_caption.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.355209Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.354975Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.360536Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.360078Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_classes td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_classes .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_classes th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_classes th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_classes th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_classes td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_classes th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_classes td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_after_classes .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_after_classes .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "#T_after_classes caption {\n",
+ " caption-side: bottom;\n",
+ " font-size: 1.25em;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_classes\" class=\"my-table-cls\">\n",
+ " <caption>Confusion matrix for multiple cancer prediction models.</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_classes_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_classes_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_classes_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_classes_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_classes_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_classes_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_classes_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_classes_row0_col0\" class=\"data row0 col0 true \" >38</td>\n",
+ " <td id=\"T_after_classes_row0_col1\" class=\"data row0 col1 false \" >2</td>\n",
+ " <td id=\"T_after_classes_row0_col2\" class=\"data row0 col2 true \" >18</td>\n",
+ " <td id=\"T_after_classes_row0_col3\" class=\"data row0 col3 false \" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_classes_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_classes_row1_col0\" class=\"data row1 col0 false \" >19</td>\n",
+ " <td id=\"T_after_classes_row1_col1\" class=\"data row1 col1 true \" >439</td>\n",
+ " <td id=\"T_after_classes_row1_col2\" class=\"data row1 col2 false \" >6</td>\n",
+ " <td id=\"T_after_classes_row1_col3\" class=\"data row1 col3 true \" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 30,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s.set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
+ " .set_table_styles([{\n",
+ " 'selector': 'caption',\n",
+ " 'props': 'caption-side: bottom; font-size:1.25em;'\n",
+ " }], overwrite=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.362496Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.362327Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.367625Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.367082Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_caption td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_caption .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_caption th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_caption th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_caption th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_caption td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_caption th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_caption td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_after_caption .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_after_caption .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "#T_after_caption caption {\n",
+ " caption-side: bottom;\n",
+ " font-size: 1.25em;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_caption\" class=\"my-table-cls\">\n",
+ " <caption>Confusion matrix for multiple cancer prediction models.</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_caption_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_caption_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_caption_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_caption_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_caption_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_caption_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_caption_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_caption_row0_col0\" class=\"data row0 col0 true \" >38</td>\n",
+ " <td id=\"T_after_caption_row0_col1\" class=\"data row0 col1 false \" >2</td>\n",
+ " <td id=\"T_after_caption_row0_col2\" class=\"data row0 col2 true \" >18</td>\n",
+ " <td id=\"T_after_caption_row0_col3\" class=\"data row0 col3 false \" >22</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_caption_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_caption_row1_col0\" class=\"data row1 col0 false \" >19</td>\n",
+ " <td id=\"T_after_caption_row1_col1\" class=\"data row1 col1 true \" >439</td>\n",
+ " <td id=\"T_after_caption_row1_col2\" class=\"data row1 col2 false \" >6</td>\n",
+ " <td id=\"T_after_caption_row1_col3\" class=\"data row1 col3 true \" >452</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_caption')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Adding tooltips (*since version 1.3.0*) can be done using the [.set_tooltips()][tooltips] method in the same way you can add CSS classes to data cells by providing a string based DataFrame with intersecting indices and columns. You don't have to specify a `css_class` name or any css `props` for the tooltips, since there are standard defaults, but the option is there if you want more visual control. \n",
+ "\n",
+ "[tooltips]: ../reference/api/pandas.io.formats.style.Styler.set_tooltips.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.369724Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.369555Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.377614Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.377168Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_caption td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_caption .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_caption th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_caption th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_caption th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_caption td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_caption th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_caption td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_after_caption .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_after_caption .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "#T_after_caption caption {\n",
+ " caption-side: bottom;\n",
+ " font-size: 1.25em;\n",
+ "}\n",
+ "#T_after_caption .pd-t {\n",
+ " visibility: hidden;\n",
+ " position: absolute;\n",
+ " z-index: 1;\n",
+ " border: 1px solid #000066;\n",
+ " background-color: white;\n",
+ " color: #000066;\n",
+ " font-size: 0.8em;\n",
+ " transform: translate(0px, -24px);\n",
+ " padding: 0.6em;\n",
+ " border-radius: 0.5em;\n",
+ "}\n",
+ "#T_after_caption #T_after_caption_row0_col0:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_caption #T_after_caption_row0_col0 .pd-t::after {\n",
+ " content: \"This model has a very strong true positive rate\";\n",
+ "}\n",
+ "#T_after_caption #T_after_caption_row0_col3:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_caption #T_after_caption_row0_col3 .pd-t::after {\n",
+ " content: \"This model's total number of false negatives is too high\";\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_caption\" class=\"my-table-cls\">\n",
+ " <caption>Confusion matrix for multiple cancer prediction models.</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_caption_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_caption_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_caption_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_caption_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_caption_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_caption_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_caption_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_caption_row0_col0\" class=\"data row0 col0 true \" >38<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_caption_row0_col1\" class=\"data row0 col1 false \" >2<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_caption_row0_col2\" class=\"data row0 col2 true \" >18<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_caption_row0_col3\" class=\"data row0 col3 false \" >22<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_caption_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_caption_row1_col0\" class=\"data row1 col0 false \" >19<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_caption_row1_col1\" class=\"data row1 col1 true \" >439<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_caption_row1_col2\" class=\"data row1 col2 false \" >6<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_caption_row1_col3\" class=\"data row1 col3 true \" >452<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 32,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "tt = pd.DataFrame([['This model has a very strong true positive rate', \n",
+ " \"This model's total number of false negatives is too high\"]], \n",
+ " index=['Tumour (Positive)'], columns=df.columns[[0,3]])\n",
+ "s.set_tooltips(tt, props='visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
+ " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
+ " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.379584Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.379385Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.386168Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.385769Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_tooltips td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_tooltips .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_tooltips th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_tooltips th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_tooltips th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_tooltips td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_tooltips th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_tooltips td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_after_tooltips .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_after_tooltips .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "#T_after_tooltips caption {\n",
+ " caption-side: bottom;\n",
+ " font-size: 1.25em;\n",
+ "}\n",
+ "#T_after_tooltips .pd-t {\n",
+ " visibility: hidden;\n",
+ " position: absolute;\n",
+ " z-index: 1;\n",
+ " border: 1px solid #000066;\n",
+ " background-color: white;\n",
+ " color: #000066;\n",
+ " font-size: 0.8em;\n",
+ " transform: translate(0px, -24px);\n",
+ " padding: 0.6em;\n",
+ " border-radius: 0.5em;\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col0:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col0 .pd-t::after {\n",
+ " content: \"This model has a very strong true positive rate\";\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col3:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col3 .pd-t::after {\n",
+ " content: \"This model's total number of false negatives is too high\";\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_tooltips\" class=\"my-table-cls\">\n",
+ " <caption>Confusion matrix for multiple cancer prediction models.</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_tooltips_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_tooltips_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_tooltips_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_tooltips_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_tooltips_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_tooltips_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tooltips_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_tooltips_row0_col0\" class=\"data row0 col0 true \" >38<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row0_col1\" class=\"data row0 col1 false \" >2<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row0_col2\" class=\"data row0 col2 true \" >18<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row0_col3\" class=\"data row0 col3 false \" >22<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tooltips_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_tooltips_row1_col0\" class=\"data row1 col0 false \" >19<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row1_col1\" class=\"data row1 col1 true \" >439<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row1_col2\" class=\"data row1 col2 false \" >6<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row1_col3\" class=\"data row1 col3 true \" >452<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 33,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_tooltips')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The only thing left to do for our table is to add the highlighting borders to draw the audience attention to the tooltips. We will create internal CSS classes as before using table styles. **Setting classes always overwrites** so we need to make sure we add the previous classes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.388436Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.388245Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.398134Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.397636Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_tooltips td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_tooltips .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_tooltips th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_tooltips th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_tooltips th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_tooltips td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_tooltips th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_tooltips td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_after_tooltips .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_after_tooltips .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "#T_after_tooltips caption {\n",
+ " caption-side: bottom;\n",
+ " font-size: 1.25em;\n",
+ "}\n",
+ "#T_after_tooltips .border-red {\n",
+ " border: 2px dashed red;\n",
+ "}\n",
+ "#T_after_tooltips .border-green {\n",
+ " border: 2px dashed green;\n",
+ "}\n",
+ "#T_after_tooltips .pd-t {\n",
+ " visibility: hidden;\n",
+ " position: absolute;\n",
+ " z-index: 1;\n",
+ " border: 1px solid #000066;\n",
+ " background-color: white;\n",
+ " color: #000066;\n",
+ " font-size: 0.8em;\n",
+ " transform: translate(0px, -24px);\n",
+ " padding: 0.6em;\n",
+ " border-radius: 0.5em;\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col0:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col0 .pd-t::after {\n",
+ " content: \"This model has a very strong true positive rate\";\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col3:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_tooltips #T_after_tooltips_row0_col3 .pd-t::after {\n",
+ " content: \"This model's total number of false negatives is too high\";\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_tooltips\" class=\"my-table-cls\">\n",
+ " <caption>Confusion matrix for multiple cancer prediction models.</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_tooltips_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_tooltips_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_tooltips_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_tooltips_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_tooltips_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_tooltips_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tooltips_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_tooltips_row0_col0\" class=\"data row0 col0 true border-green \" >38<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row0_col1\" class=\"data row0 col1 false \" >2<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row0_col2\" class=\"data row0 col2 true \" >18<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row0_col3\" class=\"data row0 col3 false border-red \" >22<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_tooltips_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_tooltips_row1_col0\" class=\"data row1 col0 false \" >19<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row1_col1\" class=\"data row1 col1 true \" >439<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row1_col2\" class=\"data row1 col2 false \" >6<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_tooltips_row1_col3\" class=\"data row1 col3 true \" >452<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 34,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "s.set_table_styles([ # create internal CSS classes\n",
+ " {'selector': '.border-red', 'props': 'border: 2px dashed red;'},\n",
+ " {'selector': '.border-green', 'props': 'border: 2px dashed green;'},\n",
+ "], overwrite=False)\n",
+ "cell_border = pd.DataFrame([['border-green ', ' ', ' ', 'border-red '], \n",
+ " [' ', ' ', ' ', ' ']], \n",
+ " index=df.index, \n",
+ " columns=df.columns[:4])\n",
+ "s.set_td_classes(cell_color + cell_border)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.400114Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.399865Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.406938Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.406425Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_after_borders td:hover {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_after_borders .index_name {\n",
+ " font-style: italic;\n",
+ " color: darkgrey;\n",
+ " font-weight: normal;\n",
+ "}\n",
+ "#T_after_borders th:not(.index_name) {\n",
+ " background-color: #000066;\n",
+ " color: white;\n",
+ "}\n",
+ "#T_after_borders th.col_heading {\n",
+ " text-align: center;\n",
+ "}\n",
+ "#T_after_borders th.col_heading.level0 {\n",
+ " font-size: 1.5em;\n",
+ "}\n",
+ "#T_after_borders td {\n",
+ " text-align: center;\n",
+ " font-weight: bold;\n",
+ "}\n",
+ "#T_after_borders th.col2 {\n",
+ " border-left: 1px solid white;\n",
+ "}\n",
+ "#T_after_borders td.col2 {\n",
+ " border-left: 1px solid #000066;\n",
+ "}\n",
+ "#T_after_borders .true {\n",
+ " background-color: #e6ffe6;\n",
+ "}\n",
+ "#T_after_borders .false {\n",
+ " background-color: #ffe6e6;\n",
+ "}\n",
+ "#T_after_borders caption {\n",
+ " caption-side: bottom;\n",
+ " font-size: 1.25em;\n",
+ "}\n",
+ "#T_after_borders .border-red {\n",
+ " border: 2px dashed red;\n",
+ "}\n",
+ "#T_after_borders .border-green {\n",
+ " border: 2px dashed green;\n",
+ "}\n",
+ "#T_after_borders .pd-t {\n",
+ " visibility: hidden;\n",
+ " position: absolute;\n",
+ " z-index: 1;\n",
+ " border: 1px solid #000066;\n",
+ " background-color: white;\n",
+ " color: #000066;\n",
+ " font-size: 0.8em;\n",
+ " transform: translate(0px, -24px);\n",
+ " padding: 0.6em;\n",
+ " border-radius: 0.5em;\n",
+ "}\n",
+ "#T_after_borders #T_after_borders_row0_col0:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_borders #T_after_borders_row0_col0 .pd-t::after {\n",
+ " content: \"This model has a very strong true positive rate\";\n",
+ "}\n",
+ "#T_after_borders #T_after_borders_row0_col3:hover .pd-t {\n",
+ " visibility: visible;\n",
+ "}\n",
+ "#T_after_borders #T_after_borders_row0_col3 .pd-t::after {\n",
+ " content: \"This model's total number of false negatives is too high\";\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_after_borders\" class=\"my-table-cls\">\n",
+ " <caption>Confusion matrix for multiple cancer prediction models.</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Model:</th>\n",
+ " <th id=\"T_after_borders_level0_col0\" class=\"col_heading level0 col0\" colspan=\"2\">Decision Tree</th>\n",
+ " <th id=\"T_after_borders_level0_col2\" class=\"col_heading level0 col2\" colspan=\"2\">Regression</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level1\" >Predicted:</th>\n",
+ " <th id=\"T_after_borders_level1_col0\" class=\"col_heading level1 col0\" >Tumour</th>\n",
+ " <th id=\"T_after_borders_level1_col1\" class=\"col_heading level1 col1\" >Non-Tumour</th>\n",
+ " <th id=\"T_after_borders_level1_col2\" class=\"col_heading level1 col2\" >Tumour</th>\n",
+ " <th id=\"T_after_borders_level1_col3\" class=\"col_heading level1 col3\" >Non-Tumour</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\"index_name level0\" >Actual Label:</th>\n",
+ " <th class=\"blank col0\" > </th>\n",
+ " <th class=\"blank col1\" > </th>\n",
+ " <th class=\"blank col2\" > </th>\n",
+ " <th class=\"blank col3\" > </th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_after_borders_level0_row0\" class=\"row_heading level0 row0\" >Tumour (Positive)</th>\n",
+ " <td id=\"T_after_borders_row0_col0\" class=\"data row0 col0 true border-green \" >38<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_borders_row0_col1\" class=\"data row0 col1 false \" >2<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_borders_row0_col2\" class=\"data row0 col2 true \" >18<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_borders_row0_col3\" class=\"data row0 col3 false border-red \" >22<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_after_borders_level0_row1\" class=\"row_heading level0 row1\" >Non-Tumour (Negative)</th>\n",
+ " <td id=\"T_after_borders_row1_col0\" class=\"data row1 col0 false \" >19<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_borders_row1_col1\" class=\"data row1 col1 true \" >439<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_borders_row1_col2\" class=\"data row1 col2 false \" >6<span class=\"pd-t\"></span></td>\n",
+ " <td id=\"T_after_borders_row1_col3\" class=\"data row1 col3 true \" >452<span class=\"pd-t\"></span></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf3d0>"
+ ]
+ },
+ "execution_count": 35,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_borders')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Finer Control with Slicing\n",
+ "\n",
+ "The examples we have shown so far for the `Styler.apply` and `Styler.map` functions have not demonstrated the use of the ``subset`` argument. This is a useful argument which permits a lot of flexibility: it allows you to apply styles to specific rows or columns, without having to code that logic into your `style` function.\n",
+ "\n",
+ "The value passed to `subset` behaves similar to slicing a DataFrame;\n",
+ "\n",
+ "- A scalar is treated as a column label\n",
+ "- A list (or Series or NumPy array) is treated as multiple column labels\n",
+ "- A tuple is treated as `(row_indexer, column_indexer)`\n",
+ "\n",
+ "Consider using `pd.IndexSlice` to construct the tuple for the last one. We will create a MultiIndexed DataFrame to demonstrate the functionality."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.409173Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.408951Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.417055Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.416627Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<div>\n",
+ "<style scoped>\n",
+ " .dataframe tbody tr th:only-of-type {\n",
+ " vertical-align: middle;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "</style>\n",
+ "<table border=\"1\" class=\"dataframe\">\n",
+ " <thead>\n",
+ " <tr style=\"text-align: right;\">\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th>c1</th>\n",
+ " <th>c2</th>\n",
+ " <th>c3</th>\n",
+ " <th>c4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th rowspan=\"2\" valign=\"top\">A</th>\n",
+ " <th>r1</th>\n",
+ " <td>-1.048553</td>\n",
+ " <td>-1.420018</td>\n",
+ " <td>-1.706270</td>\n",
+ " <td>1.950775</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>r2</th>\n",
+ " <td>-0.509652</td>\n",
+ " <td>-0.438074</td>\n",
+ " <td>-1.252795</td>\n",
+ " <td>0.777490</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th rowspan=\"2\" valign=\"top\">B</th>\n",
+ " <th>r1</th>\n",
+ " <td>-1.613898</td>\n",
+ " <td>-0.212740</td>\n",
+ " <td>-0.895467</td>\n",
+ " <td>0.386902</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>r2</th>\n",
+ " <td>-0.510805</td>\n",
+ " <td>-1.180632</td>\n",
+ " <td>-0.028182</td>\n",
+ " <td>0.428332</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</div>"
+ ],
+ "text/plain": [
+ " c1 c2 c3 c4\n",
+ "A r1 -1.048553 -1.420018 -1.706270 1.950775\n",
+ " r2 -0.509652 -0.438074 -1.252795 0.777490\n",
+ "B r1 -1.613898 -0.212740 -0.895467 0.386902\n",
+ " r2 -0.510805 -1.180632 -0.028182 0.428332"
+ ]
+ },
+ "execution_count": 36,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df3 = pd.DataFrame(np.random.randn(4,4), \n",
+ " pd.MultiIndex.from_product([['A', 'B'], ['r1', 'r2']]),\n",
+ " columns=['c1','c2','c3','c4'])\n",
+ "df3"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will use subset to highlight the maximum in the third and fourth columns with red text. We will highlight the subset sliced region in yellow."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.418964Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.418798Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.426616Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.426104Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_e2d84_row0_col2, #T_e2d84_row1_col2, #T_e2d84_row1_col3, #T_e2d84_row2_col2, #T_e2d84_row2_col3, #T_e2d84_row3_col3 {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_e2d84_row0_col3, #T_e2d84_row3_col2 {\n",
+ " color: red;\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_e2d84\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank\" > </th>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_e2d84_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ " <th id=\"T_e2d84_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ " <th id=\"T_e2d84_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ " <th id=\"T_e2d84_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_e2d84_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"2\">A</th>\n",
+ " <th id=\"T_e2d84_level1_row0\" class=\"row_heading level1 row0\" >r1</th>\n",
+ " <td id=\"T_e2d84_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ " <td id=\"T_e2d84_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ " <td id=\"T_e2d84_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ " <td id=\"T_e2d84_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e2d84_level1_row1\" class=\"row_heading level1 row1\" >r2</th>\n",
+ " <td id=\"T_e2d84_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ " <td id=\"T_e2d84_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ " <td id=\"T_e2d84_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ " <td id=\"T_e2d84_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e2d84_level0_row2\" class=\"row_heading level0 row2\" rowspan=\"2\">B</th>\n",
+ " <th id=\"T_e2d84_level1_row2\" class=\"row_heading level1 row2\" >r1</th>\n",
+ " <td id=\"T_e2d84_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ " <td id=\"T_e2d84_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ " <td id=\"T_e2d84_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ " <td id=\"T_e2d84_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e2d84_level1_row3\" class=\"row_heading level1 row3\" >r2</th>\n",
+ " <td id=\"T_e2d84_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ " <td id=\"T_e2d84_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ " <td id=\"T_e2d84_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ " <td id=\"T_e2d84_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c17dabf70>"
+ ]
+ },
+ "execution_count": 37,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "slice_ = ['c3', 'c4']\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If combined with the ``IndexSlice`` as suggested then it can index across both dimensions with greater flexibility."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.428679Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.428473Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.437510Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.436947Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_961b7_row0_col1, #T_961b7_row0_col2, #T_961b7_row2_col3 {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_961b7_row0_col3, #T_961b7_row2_col1, #T_961b7_row2_col2 {\n",
+ " color: red;\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_961b7\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank\" > </th>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_961b7_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ " <th id=\"T_961b7_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ " <th id=\"T_961b7_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ " <th id=\"T_961b7_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_961b7_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"2\">A</th>\n",
+ " <th id=\"T_961b7_level1_row0\" class=\"row_heading level1 row0\" >r1</th>\n",
+ " <td id=\"T_961b7_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ " <td id=\"T_961b7_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ " <td id=\"T_961b7_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ " <td id=\"T_961b7_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_961b7_level1_row1\" class=\"row_heading level1 row1\" >r2</th>\n",
+ " <td id=\"T_961b7_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ " <td id=\"T_961b7_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ " <td id=\"T_961b7_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ " <td id=\"T_961b7_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_961b7_level0_row2\" class=\"row_heading level0 row2\" rowspan=\"2\">B</th>\n",
+ " <th id=\"T_961b7_level1_row2\" class=\"row_heading level1 row2\" >r1</th>\n",
+ " <td id=\"T_961b7_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ " <td id=\"T_961b7_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ " <td id=\"T_961b7_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ " <td id=\"T_961b7_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_961b7_level1_row3\" class=\"row_heading level1 row3\" >r2</th>\n",
+ " <td id=\"T_961b7_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ " <td id=\"T_961b7_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ " <td id=\"T_961b7_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ " <td id=\"T_961b7_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c17dabca0>"
+ ]
+ },
+ "execution_count": 38,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "idx = pd.IndexSlice\n",
+ "slice_ = idx[idx[:,'r1'], idx['c2':'c4']]\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This also provides the flexibility to sub select rows when used with the `axis=1`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.439704Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.439513Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.447770Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.447225Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_67cc4_row1_col0, #T_67cc4_row1_col1, #T_67cc4_row1_col2, #T_67cc4_row3_col0, #T_67cc4_row3_col1, #T_67cc4_row3_col2 {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_67cc4_row1_col3, #T_67cc4_row3_col3 {\n",
+ " color: red;\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_67cc4\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank\" > </th>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_67cc4_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ " <th id=\"T_67cc4_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ " <th id=\"T_67cc4_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ " <th id=\"T_67cc4_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_67cc4_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"2\">A</th>\n",
+ " <th id=\"T_67cc4_level1_row0\" class=\"row_heading level1 row0\" >r1</th>\n",
+ " <td id=\"T_67cc4_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ " <td id=\"T_67cc4_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ " <td id=\"T_67cc4_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ " <td id=\"T_67cc4_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_67cc4_level1_row1\" class=\"row_heading level1 row1\" >r2</th>\n",
+ " <td id=\"T_67cc4_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ " <td id=\"T_67cc4_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ " <td id=\"T_67cc4_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ " <td id=\"T_67cc4_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_67cc4_level0_row2\" class=\"row_heading level0 row2\" rowspan=\"2\">B</th>\n",
+ " <th id=\"T_67cc4_level1_row2\" class=\"row_heading level1 row2\" >r1</th>\n",
+ " <td id=\"T_67cc4_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ " <td id=\"T_67cc4_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ " <td id=\"T_67cc4_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ " <td id=\"T_67cc4_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_67cc4_level1_row3\" class=\"row_heading level1 row3\" >r2</th>\n",
+ " <td id=\"T_67cc4_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ " <td id=\"T_67cc4_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ " <td id=\"T_67cc4_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ " <td id=\"T_67cc4_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaf610>"
+ ]
+ },
+ "execution_count": 39,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "slice_ = idx[idx[:,'r2'], :]\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "There is also scope to provide **conditional filtering**. \n",
+ "\n",
+ "Suppose we want to highlight the maximum across columns 2 and 4 only in the case that the sum of columns 1 and 3 is less than -2.0 *(essentially excluding rows* `(:,'r2')`*)*."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.449966Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.449671Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.458552Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.458014Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_000dd_row0_col1, #T_000dd_row2_col1 {\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "#T_000dd_row0_col3, #T_000dd_row2_col3 {\n",
+ " color: red;\n",
+ " background-color: #ffffb3;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_000dd\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank\" > </th>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_000dd_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ " <th id=\"T_000dd_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ " <th id=\"T_000dd_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ " <th id=\"T_000dd_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_000dd_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"2\">A</th>\n",
+ " <th id=\"T_000dd_level1_row0\" class=\"row_heading level1 row0\" >r1</th>\n",
+ " <td id=\"T_000dd_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ " <td id=\"T_000dd_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ " <td id=\"T_000dd_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ " <td id=\"T_000dd_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_000dd_level1_row1\" class=\"row_heading level1 row1\" >r2</th>\n",
+ " <td id=\"T_000dd_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ " <td id=\"T_000dd_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ " <td id=\"T_000dd_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ " <td id=\"T_000dd_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_000dd_level0_row2\" class=\"row_heading level0 row2\" rowspan=\"2\">B</th>\n",
+ " <th id=\"T_000dd_level1_row2\" class=\"row_heading level1 row2\" >r1</th>\n",
+ " <td id=\"T_000dd_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ " <td id=\"T_000dd_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ " <td id=\"T_000dd_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ " <td id=\"T_000dd_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_000dd_level1_row3\" class=\"row_heading level1 row3\" >r2</th>\n",
+ " <td id=\"T_000dd_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ " <td id=\"T_000dd_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ " <td id=\"T_000dd_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ " <td id=\"T_000dd_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaec20>"
+ ]
+ },
+ "execution_count": 40,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "slice_ = idx[idx[(df3['c1'] + df3['c3']) < -2.0], ['c2', 'c4']]\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Only label-based slicing is supported right now, not positional, and not callables.\n",
+ "\n",
+ "If your style function uses a `subset` or `axis` keyword argument, consider wrapping your function in a `functools.partial`, partialing out that keyword.\n",
+ "\n",
+ "```python\n",
+ "my_func2 = functools.partial(my_func, subset=42)\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Optimization\n",
+ "\n",
+ "Generally, for smaller tables and most cases, the rendered HTML does not need to be optimized, and we don't really recommend it. There are two cases where it is worth considering:\n",
+ "\n",
+ " - If you are rendering and styling a very large HTML table, certain browsers have performance issues.\n",
+ " - If you are using ``Styler`` to dynamically create part of online user interfaces and want to improve network performance.\n",
+ " \n",
+ "Here we recommend the following steps to implement:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 1. Remove UUID and cell_ids\n",
+ "\n",
+ "Ignore the `uuid` and set `cell_ids` to `False`. This will prevent unnecessary HTML."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<div class=\"alert alert-warning\">\n",
+ "\n",
+ "<font color=red>This is sub-optimal:</font>\n",
+ "\n",
+ "</div>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.460828Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.460614Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.463438Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.463034Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "df4 = pd.DataFrame([[1,2],[3,4]])\n",
+ "s4 = df4.style"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<div class=\"alert alert-info\">\n",
+ "\n",
+ "<font color=green>This is better:</font>\n",
+ "\n",
+ "</div>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.465505Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.465337Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.467912Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.467476Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "from pandas.io.formats.style import Styler\n",
+ "s4 = Styler(df4, uuid_len=0, cell_ids=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 2. Use table styles\n",
+ "\n",
+ "Use table styles where possible (e.g. for all cells or rows or columns at a time) since the CSS is nearly always more efficient than other formats."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<div class=\"alert alert-warning\">\n",
+ "\n",
+ "<font color=red>This is sub-optimal:</font>\n",
+ "\n",
+ "</div>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.469874Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.469709Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.475549Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.475108Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_663ed_row0_col1, #T_663ed_row1_col1 {\n",
+ " font-family: \"Times New Roman\", Times, serif;\n",
+ " color: #e83e8c;\n",
+ " font-size: 1.3em;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_663ed\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_663ed_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_663ed_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_663ed_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_663ed_row0_col0\" class=\"data row0 col0\" >1</td>\n",
+ " <td id=\"T_663ed_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_663ed_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_663ed_row1_col0\" class=\"data row1 col0\" >3</td>\n",
+ " <td id=\"T_663ed_row1_col1\" class=\"data row1 col1\" >4</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adaeaa0>"
+ ]
+ },
+ "execution_count": 43,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "props = 'font-family: \"Times New Roman\", Times, serif; color: #e83e8c; font-size:1.3em;'\n",
+ "df4.style.map(lambda x: props, subset=[1])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<div class=\"alert alert-info\">\n",
+ "\n",
+ "<font color=green>This is better:</font>\n",
+ "\n",
+ "</div>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.477636Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.477432Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.481775Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.481309Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_083a3 td.col1 {\n",
+ " font-family: \"Times New Roman\", Times, serif;\n",
+ " color: #e83e8c;\n",
+ " font-size: 1.3em;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_083a3\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_083a3_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_083a3_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_083a3_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_083a3_row0_col0\" class=\"data row0 col0\" >1</td>\n",
+ " <td id=\"T_083a3_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_083a3_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_083a3_row1_col0\" class=\"data row1 col0\" >3</td>\n",
+ " <td id=\"T_083a3_row1_col1\" class=\"data row1 col1\" >4</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adac1f0>"
+ ]
+ },
+ "execution_count": 44,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.set_table_styles([{'selector': 'td.col1', 'props': props}])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 3. Set classes instead of using Styler functions\n",
+ "\n",
+ "For large DataFrames where the same style is applied to many cells it can be more efficient to declare the styles as classes and then apply those classes to data cells, rather than directly applying styles to cells. It is, however, probably still easier to use the Styler function api when you are not concerned about optimization."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<div class=\"alert alert-warning\">\n",
+ "\n",
+ "<font color=red>This is sub-optimal:</font>\n",
+ "\n",
+ "</div>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.483913Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.483746Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.493460Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.492980Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_5b3ba_row0_col2, #T_5b3ba_row7_col1 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "#T_5b3ba_row0_col3 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_5b3ba_row1_col0, #T_5b3ba_row2_col3, #T_5b3ba_row3_col0, #T_5b3ba_row4_col0, #T_5b3ba_row5_col2, #T_5b3ba_row7_col0, #T_5b3ba_row8_col3, #T_5b3ba_row9_col0 {\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_5b3ba_row6_col0 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ " color: white;\n",
+ " background-color: purple;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_5b3ba\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_5b3ba_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_5b3ba_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_5b3ba_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_5b3ba_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_5b3ba_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_5b3ba_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_5b3ba_row0_col2\" class=\"data row0 col2\" >0.978738</td>\n",
+ " <td id=\"T_5b3ba_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_5b3ba_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_5b3ba_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_5b3ba_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_5b3ba_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_5b3ba_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_5b3ba_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_5b3ba_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_5b3ba_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_5b3ba_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_5b3ba_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_5b3ba_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_5b3ba_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_5b3ba_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_5b3ba_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_5b3ba_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_5b3ba_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_5b3ba_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_5b3ba_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_5b3ba_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_5b3ba_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_5b3ba_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_5b3ba_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_5b3ba_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_5b3ba_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_5b3ba_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_5b3ba_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_5b3ba_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_5b3ba_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_5b3ba_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_5b3ba_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_5b3ba_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_5b3ba_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_5b3ba_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_5b3ba_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_5b3ba_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_5b3ba_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_5b3ba_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c17dabd60>"
+ ]
+ },
+ "execution_count": 45,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.style.apply(highlight_max, props='color:white;background-color:darkblue;', axis=0)\\\n",
+ " .apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
+ " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<div class=\"alert alert-info\">\n",
+ "\n",
+ "<font color=green>This is better:</font>\n",
+ "\n",
+ "</div>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.495587Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.495374Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.506111Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.505581Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_ec9f2 .cls-1 {\n",
+ " color: white;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "#T_ec9f2 .cls-2 {\n",
+ " color: white;\n",
+ " background-color: pink;\n",
+ "}\n",
+ "#T_ec9f2 .cls-3 {\n",
+ " color: white;\n",
+ " background-color: purple;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_ec9f2\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_ec9f2_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_ec9f2_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_ec9f2_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_ec9f2_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_ec9f2_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_ec9f2_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_ec9f2_row0_col2\" class=\"data row0 col2 cls-1 \" >0.978738</td>\n",
+ " <td id=\"T_ec9f2_row0_col3\" class=\"data row0 col3 cls-1 cls-2 \" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_ec9f2_row1_col0\" class=\"data row1 col0 cls-2 \" >1.867558</td>\n",
+ " <td id=\"T_ec9f2_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_ec9f2_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_ec9f2_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_ec9f2_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_ec9f2_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_ec9f2_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_ec9f2_row2_col3\" class=\"data row2 col3 cls-2 \" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_ec9f2_row3_col0\" class=\"data row3 col0 cls-2 \" >0.761038</td>\n",
+ " <td id=\"T_ec9f2_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_ec9f2_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_ec9f2_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_ec9f2_row4_col0\" class=\"data row4 col0 cls-2 \" >1.494079</td>\n",
+ " <td id=\"T_ec9f2_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_ec9f2_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_ec9f2_row4_col3\" class=\"data row4 col3\" >-0.854096</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_ec9f2_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_ec9f2_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_ec9f2_row5_col2\" class=\"data row5 col2 cls-2 \" >0.864436</td>\n",
+ " <td id=\"T_ec9f2_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_ec9f2_row6_col0\" class=\"data row6 col0 cls-1 cls-2 cls-3 \" >2.269755</td>\n",
+ " <td id=\"T_ec9f2_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_ec9f2_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_ec9f2_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_ec9f2_row7_col0\" class=\"data row7 col0 cls-2 \" >1.532779</td>\n",
+ " <td id=\"T_ec9f2_row7_col1\" class=\"data row7 col1 cls-1 \" >1.469359</td>\n",
+ " <td id=\"T_ec9f2_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_ec9f2_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_ec9f2_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_ec9f2_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_ec9f2_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_ec9f2_row8_col3\" class=\"data row8 col3 cls-2 \" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ec9f2_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_ec9f2_row9_col0\" class=\"data row9 col0 cls-2 \" >1.230291</td>\n",
+ " <td id=\"T_ec9f2_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_ec9f2_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_ec9f2_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbc0d0>"
+ ]
+ },
+ "execution_count": 46,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "build = lambda x: pd.DataFrame(x, index=df2.index, columns=df2.columns)\n",
+ "cls1 = build(df2.apply(highlight_max, props='cls-1 ', axis=0))\n",
+ "cls2 = build(df2.apply(highlight_max, props='cls-2 ', axis=1, result_type='expand').values)\n",
+ "cls3 = build(highlight_max(df2, props='cls-3 '))\n",
+ "df2.style.set_table_styles([\n",
+ " {'selector': '.cls-1', 'props': 'color:white;background-color:darkblue;'},\n",
+ " {'selector': '.cls-2', 'props': 'color:white;background-color:pink;'},\n",
+ " {'selector': '.cls-3', 'props': 'color:white;background-color:purple;'}\n",
+ "]).set_td_classes(cls1 + cls2 + cls3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 4. Don't use tooltips\n",
+ "\n",
+ "Tooltips require `cell_ids` to work and they generate extra HTML elements for *every* data cell."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 5. If every byte counts use string replacement\n",
+ "\n",
+ "You can remove unnecessary HTML, or shorten the default class names by replacing the default css dict. You can read a little more about CSS [below](#More-About-CSS-and-HTML)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.508717Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.508287Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.512884Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.512364Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "<style type=\"text/css\">\n",
+ "#T_ td {\n",
+ " font-family: \"Times New Roman\", Times, serif;\n",
+ " color: #e83e8c;\n",
+ " font-size: 1.3em;\n",
+ "}\n",
+ "#T_ .c1 {\n",
+ " color: green;\n",
+ "}\n",
+ "#T_ .l0 {\n",
+ " color: blue;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\" l0\" > </th>\n",
+ " <th class=\" l0 c0\" >0</th>\n",
+ " <th class=\" l0 c1\" >1</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th class=\" l0 r0\" >0</th>\n",
+ " <td class=\" r0 c0\" >1</td>\n",
+ " <td class=\" r0 c1\" >2</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\" l0 r1\" >1</th>\n",
+ " <td class=\" r1 c0\" >3</td>\n",
+ " <td class=\" r1 c1\" >4</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "my_css = {\n",
+ " \"row_heading\": \"\",\n",
+ " \"col_heading\": \"\",\n",
+ " \"index_name\": \"\",\n",
+ " \"col\": \"c\",\n",
+ " \"row\": \"r\",\n",
+ " \"col_trim\": \"\",\n",
+ " \"row_trim\": \"\",\n",
+ " \"level\": \"l\",\n",
+ " \"data\": \"\",\n",
+ " \"blank\": \"\",\n",
+ "}\n",
+ "html = Styler(df4, uuid_len=0, cell_ids=False)\n",
+ "html.set_table_styles([{'selector': 'td', 'props': props},\n",
+ " {'selector': '.c1', 'props': 'color:green;'},\n",
+ " {'selector': '.l0', 'props': 'color:blue;'}],\n",
+ " css_class_names=my_css)\n",
+ "print(html.to_html())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.514881Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.514692Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.518629Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.518240Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_ td {\n",
+ " font-family: \"Times New Roman\", Times, serif;\n",
+ " color: #e83e8c;\n",
+ " font-size: 1.3em;\n",
+ "}\n",
+ "#T_ .c1 {\n",
+ " color: green;\n",
+ "}\n",
+ "#T_ .l0 {\n",
+ " color: blue;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\" l0\" > </th>\n",
+ " <th class=\" l0 c0\" >0</th>\n",
+ " <th class=\" l0 c1\" >1</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th class=\" l0 r0\" >0</th>\n",
+ " <td class=\" r0 c0\" >1</td>\n",
+ " <td class=\" r0 c1\" >2</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th class=\" l0 r1\" >1</th>\n",
+ " <td class=\" r1 c0\" >3</td>\n",
+ " <td class=\" r1 c1\" >4</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbf760>"
+ ]
+ },
+ "execution_count": 48,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "html"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Builtin Styles"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Some styling functions are common enough that we've \"built them in\" to the `Styler`, so you don't have to write them and apply them yourself. The current list of such functions is:\n",
+ "\n",
+ " - [.highlight_null][nullfunc]: for use with identifying missing data. \n",
+ " - [.highlight_min][minfunc] and [.highlight_max][maxfunc]: for use with identifying extremeties in data.\n",
+ " - [.highlight_between][betweenfunc] and [.highlight_quantile][quantilefunc]: for use with identifying classes within data.\n",
+ " - [.background_gradient][bgfunc]: a flexible method for highlighting cells based on their, or other, values on a numeric scale.\n",
+ " - [.text_gradient][textfunc]: similar method for highlighting text based on their, or other, values on a numeric scale.\n",
+ " - [.bar][barfunc]: to display mini-charts within cell backgrounds.\n",
+ " \n",
+ "The individual documentation on each function often gives more examples of their arguments.\n",
+ "\n",
+ "[nullfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_null.rst\n",
+ "[minfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_min.rst\n",
+ "[maxfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_max.rst\n",
+ "[betweenfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_between.rst\n",
+ "[quantilefunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_quantile.rst\n",
+ "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst\n",
+ "[textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst\n",
+ "[barfunc]: ../reference/api/pandas.io.formats.style.Styler.bar.rst"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Highlight Null"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.521079Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.520607Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.527446Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.526886Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_c4aeb_row0_col2, #T_c4aeb_row4_col3 {\n",
+ " background-color: yellow;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_c4aeb\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_c4aeb_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_c4aeb_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_c4aeb_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_c4aeb_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_c4aeb_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_c4aeb_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_c4aeb_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_c4aeb_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_c4aeb_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_c4aeb_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_c4aeb_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_c4aeb_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_c4aeb_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_c4aeb_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_c4aeb_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_c4aeb_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_c4aeb_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_c4aeb_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_c4aeb_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_c4aeb_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_c4aeb_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_c4aeb_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_c4aeb_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_c4aeb_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_c4aeb_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_c4aeb_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_c4aeb_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_c4aeb_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_c4aeb_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbd6c0>"
+ ]
+ },
+ "execution_count": 49,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.iloc[0,2] = np.nan\n",
+ "df2.iloc[4,3] = np.nan\n",
+ "df2.loc[:4].style.highlight_null(color='yellow')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Highlight Min or Max"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.529411Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.529211Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.536717Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.536311Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_48903_row0_col3, #T_48903_row1_col0, #T_48903_row2_col3, #T_48903_row3_col0, #T_48903_row4_col0 {\n",
+ " color: white;\n",
+ " font-weight: bold;\n",
+ " background-color: darkblue;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_48903\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_48903_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_48903_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_48903_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_48903_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_48903_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_48903_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_48903_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_48903_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_48903_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_48903_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_48903_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_48903_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_48903_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_48903_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_48903_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_48903_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_48903_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_48903_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_48903_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_48903_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_48903_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_48903_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_48903_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_48903_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_48903_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_48903_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_48903_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_48903_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_48903_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbc910>"
+ ]
+ },
+ "execution_count": 50,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.loc[:4].style.highlight_max(axis=1, props='color:white; font-weight:bold; background-color:darkblue;')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Highlight Between\n",
+ "This method accepts ranges as float, or NumPy arrays or Series provided the indexes match."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.538989Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.538790Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.549313Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.548893Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_0d827_row0_col1, #T_0d827_row2_col1, #T_0d827_row2_col3, #T_0d827_row3_col1, #T_0d827_row4_col0 {\n",
+ " color: white;\n",
+ " background-color: purple;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_0d827\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_0d827_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_0d827_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_0d827_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_0d827_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_0d827_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_0d827_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_0d827_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_0d827_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_0d827_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_0d827_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_0d827_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_0d827_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_0d827_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_0d827_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_0d827_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_0d827_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_0d827_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_0d827_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_0d827_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_0d827_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_0d827_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_0d827_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_0d827_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_0d827_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_0d827_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_0d827_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_0d827_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_0d827_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_0d827_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbe080>"
+ ]
+ },
+ "execution_count": 51,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "left = pd.Series([1.0, 0.0, 1.0], index=[\"A\", \"B\", \"D\"])\n",
+ "df2.loc[:4].style.highlight_between(left=left, right=1.5, axis=1, props='color:white; background-color:purple;')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Highlight Quantile\n",
+ "Useful for detecting the highest or lowest percentile values"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.551225Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.551057Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.558989Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.558362Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_167df_row0_col0, #T_167df_row0_col3, #T_167df_row1_col0 {\n",
+ " background-color: yellow;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_167df\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_167df_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_167df_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_167df_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_167df_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_167df_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_167df_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_167df_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_167df_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_167df_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_167df_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_167df_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_167df_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_167df_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_167df_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_167df_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_167df_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_167df_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_167df_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_167df_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_167df_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_167df_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_167df_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_167df_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_167df_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_167df_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_167df_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_167df_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_167df_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_167df_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbd180>"
+ ]
+ },
+ "execution_count": 52,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color='yellow')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Background Gradient and Text Gradient"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can create \"heatmaps\" with the `background_gradient` and `text_gradient` methods. These require matplotlib, and we'll use [Seaborn](http://seaborn.pydata.org/) to get a nice colormap."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.561211Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.560996Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.912596Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.912120Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_9c22a_row0_col0 {\n",
+ " background-color: #188c18;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row0_col1 {\n",
+ " background-color: #49a449;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row0_col2, #T_9c22a_row4_col3 {\n",
+ " background-color: #000000;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row0_col3, #T_9c22a_row1_col2, #T_9c22a_row6_col0, #T_9c22a_row7_col1 {\n",
+ " background-color: #008000;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row1_col0 {\n",
+ " background-color: #138913;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row1_col1 {\n",
+ " background-color: #a6d2a6;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row1_col3 {\n",
+ " background-color: #bddcbd;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row2_col0 {\n",
+ " background-color: #73b873;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row2_col1 {\n",
+ " background-color: #48a348;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row2_col2 {\n",
+ " background-color: #8ec58e;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row2_col3 {\n",
+ " background-color: #3e9e3e;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row3_col0 {\n",
+ " background-color: #4aa44a;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row3_col1 {\n",
+ " background-color: #5bad5b;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row3_col2 {\n",
+ " background-color: #58ab58;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row3_col3 {\n",
+ " background-color: #96c996;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row4_col0 {\n",
+ " background-color: #269226;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row4_col1 {\n",
+ " background-color: #72b872;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row4_col2 {\n",
+ " background-color: #6fb76f;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row5_col0, #T_9c22a_row5_col3, #T_9c22a_row8_col1, #T_9c22a_row9_col2 {\n",
+ " background-color: #ebf3eb;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row5_col1 {\n",
+ " background-color: #379b37;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row5_col2 {\n",
+ " background-color: #0f870f;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row6_col1 {\n",
+ " background-color: #c7e1c7;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row6_col2 {\n",
+ " background-color: #9fce9f;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row6_col3 {\n",
+ " background-color: #bfdebf;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row7_col0 {\n",
+ " background-color: #249224;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row7_col2 {\n",
+ " background-color: #8cc58c;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row7_col3 {\n",
+ " background-color: #92c892;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row8_col0 {\n",
+ " background-color: #9acb9a;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row8_col2 {\n",
+ " background-color: #e4f0e4;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row8_col3 {\n",
+ " background-color: #a4d0a4;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_9c22a_row9_col0 {\n",
+ " background-color: #339933;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row9_col1 {\n",
+ " background-color: #118911;\n",
+ " color: #f1f1f1;\n",
+ "}\n",
+ "#T_9c22a_row9_col3 {\n",
+ " background-color: #c9e2c8;\n",
+ " color: #000000;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_9c22a\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_9c22a_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_9c22a_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_9c22a_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_9c22a_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_9c22a_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_9c22a_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_9c22a_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_9c22a_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_9c22a_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_9c22a_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_9c22a_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_9c22a_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_9c22a_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_9c22a_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_9c22a_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_9c22a_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_9c22a_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_9c22a_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_9c22a_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_9c22a_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_9c22a_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_9c22a_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_9c22a_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_9c22a_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_9c22a_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_9c22a_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_9c22a_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_9c22a_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_9c22a_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_9c22a_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_9c22a_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_9c22a_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_9c22a_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_9c22a_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_9c22a_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_9c22a_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_9c22a_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_9c22a_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_9c22a_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_9c22a_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_9c22a_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_9c22a_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_9c22a_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_9c22a_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_9c22a_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbf820>"
+ ]
+ },
+ "execution_count": 53,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import seaborn as sns\n",
+ "cm = sns.light_palette(\"green\", as_cmap=True)\n",
+ "\n",
+ "df2.style.background_gradient(cmap=cm)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 54,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.914782Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.914395Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.923462Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.922900Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_ab901_row0_col0 {\n",
+ " color: #188c18;\n",
+ "}\n",
+ "#T_ab901_row0_col1 {\n",
+ " color: #49a449;\n",
+ "}\n",
+ "#T_ab901_row0_col2, #T_ab901_row4_col3 {\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_ab901_row0_col3, #T_ab901_row1_col2, #T_ab901_row6_col0, #T_ab901_row7_col1 {\n",
+ " color: #008000;\n",
+ "}\n",
+ "#T_ab901_row1_col0 {\n",
+ " color: #138913;\n",
+ "}\n",
+ "#T_ab901_row1_col1 {\n",
+ " color: #a6d2a6;\n",
+ "}\n",
+ "#T_ab901_row1_col3 {\n",
+ " color: #bddcbd;\n",
+ "}\n",
+ "#T_ab901_row2_col0 {\n",
+ " color: #73b873;\n",
+ "}\n",
+ "#T_ab901_row2_col1 {\n",
+ " color: #48a348;\n",
+ "}\n",
+ "#T_ab901_row2_col2 {\n",
+ " color: #8ec58e;\n",
+ "}\n",
+ "#T_ab901_row2_col3 {\n",
+ " color: #3e9e3e;\n",
+ "}\n",
+ "#T_ab901_row3_col0 {\n",
+ " color: #4aa44a;\n",
+ "}\n",
+ "#T_ab901_row3_col1 {\n",
+ " color: #5bad5b;\n",
+ "}\n",
+ "#T_ab901_row3_col2 {\n",
+ " color: #58ab58;\n",
+ "}\n",
+ "#T_ab901_row3_col3 {\n",
+ " color: #96c996;\n",
+ "}\n",
+ "#T_ab901_row4_col0 {\n",
+ " color: #269226;\n",
+ "}\n",
+ "#T_ab901_row4_col1 {\n",
+ " color: #72b872;\n",
+ "}\n",
+ "#T_ab901_row4_col2 {\n",
+ " color: #6fb76f;\n",
+ "}\n",
+ "#T_ab901_row5_col0, #T_ab901_row5_col3, #T_ab901_row8_col1, #T_ab901_row9_col2 {\n",
+ " color: #ebf3eb;\n",
+ "}\n",
+ "#T_ab901_row5_col1 {\n",
+ " color: #379b37;\n",
+ "}\n",
+ "#T_ab901_row5_col2 {\n",
+ " color: #0f870f;\n",
+ "}\n",
+ "#T_ab901_row6_col1 {\n",
+ " color: #c7e1c7;\n",
+ "}\n",
+ "#T_ab901_row6_col2 {\n",
+ " color: #9fce9f;\n",
+ "}\n",
+ "#T_ab901_row6_col3 {\n",
+ " color: #bfdebf;\n",
+ "}\n",
+ "#T_ab901_row7_col0 {\n",
+ " color: #249224;\n",
+ "}\n",
+ "#T_ab901_row7_col2 {\n",
+ " color: #8cc58c;\n",
+ "}\n",
+ "#T_ab901_row7_col3 {\n",
+ " color: #92c892;\n",
+ "}\n",
+ "#T_ab901_row8_col0 {\n",
+ " color: #9acb9a;\n",
+ "}\n",
+ "#T_ab901_row8_col2 {\n",
+ " color: #e4f0e4;\n",
+ "}\n",
+ "#T_ab901_row8_col3 {\n",
+ " color: #a4d0a4;\n",
+ "}\n",
+ "#T_ab901_row9_col0 {\n",
+ " color: #339933;\n",
+ "}\n",
+ "#T_ab901_row9_col1 {\n",
+ " color: #118911;\n",
+ "}\n",
+ "#T_ab901_row9_col3 {\n",
+ " color: #c9e2c8;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_ab901\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_ab901_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_ab901_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_ab901_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_ab901_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_ab901_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_ab901_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_ab901_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_ab901_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_ab901_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_ab901_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_ab901_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_ab901_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_ab901_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_ab901_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_ab901_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_ab901_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_ab901_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_ab901_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_ab901_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_ab901_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_ab901_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_ab901_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_ab901_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_ab901_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_ab901_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_ab901_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_ab901_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_ab901_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_ab901_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_ab901_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_ab901_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_ab901_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_ab901_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_ab901_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_ab901_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_ab901_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_ab901_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_ab901_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_ab901_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_ab901_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_ab901_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_ab901_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_ab901_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_ab901_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_ab901_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbf7c0>"
+ ]
+ },
+ "execution_count": 54,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.style.text_gradient(cmap=cm)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[.background_gradient][bgfunc] and [.text_gradient][textfunc] have a number of keyword arguments to customise the gradients and colors. See the documentation.\n",
+ "\n",
+ "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst\n",
+ "[textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Set properties\n",
+ "\n",
+ "Use `Styler.set_properties` when the style doesn't actually depend on the values. This is just a simple wrapper for `.map` where the function returns the same properties for all cells."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.926153Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.925690Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.932700Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.932155Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_20792_row0_col0, #T_20792_row0_col1, #T_20792_row0_col2, #T_20792_row0_col3, #T_20792_row1_col0, #T_20792_row1_col1, #T_20792_row1_col2, #T_20792_row1_col3, #T_20792_row2_col0, #T_20792_row2_col1, #T_20792_row2_col2, #T_20792_row2_col3, #T_20792_row3_col0, #T_20792_row3_col1, #T_20792_row3_col2, #T_20792_row3_col3, #T_20792_row4_col0, #T_20792_row4_col1, #T_20792_row4_col2, #T_20792_row4_col3 {\n",
+ " background-color: black;\n",
+ " color: lawngreen;\n",
+ " border-color: white;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_20792\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_20792_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_20792_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_20792_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_20792_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_20792_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_20792_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_20792_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_20792_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_20792_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_20792_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_20792_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_20792_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_20792_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_20792_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_20792_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_20792_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_20792_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_20792_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_20792_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_20792_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_20792_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_20792_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_20792_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_20792_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_20792_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_20792_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_20792_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_20792_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_20792_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe604d60>"
+ ]
+ },
+ "execution_count": 55,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.loc[:4].style.set_properties(**{'background-color': 'black',\n",
+ " 'color': 'lawngreen',\n",
+ " 'border-color': 'white'})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Bar charts"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can include \"bar charts\" in your DataFrame."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 56,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.934792Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.934584Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.942726Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.942083Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_45b07_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 52.9%, #d65f5f 52.9%, #d65f5f 89.5%, transparent 89.5%);\n",
+ "}\n",
+ "#T_45b07_row0_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 57.4%, #d65f5f 57.4%, #d65f5f 69.0%, transparent 69.0%);\n",
+ "}\n",
+ "#T_45b07_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 52.9%, #d65f5f 52.9%, #d65f5f 91.7%, transparent 91.7%);\n",
+ "}\n",
+ "#T_45b07_row1_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 29.1%, #d65f5f 29.1%, #d65f5f 57.4%, transparent 57.4%);\n",
+ "}\n",
+ "#T_45b07_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.8%, #d65f5f 50.8%, #d65f5f 52.9%, transparent 52.9%);\n",
+ "}\n",
+ "#T_45b07_row2_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 57.4%, #d65f5f 57.4%, #d65f5f 69.3%, transparent 69.3%);\n",
+ "}\n",
+ "#T_45b07_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 52.9%, #d65f5f 52.9%, #d65f5f 68.7%, transparent 68.7%);\n",
+ "}\n",
+ "#T_45b07_row3_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 57.4%, #d65f5f 57.4%, #d65f5f 60.9%, transparent 60.9%);\n",
+ "}\n",
+ "#T_45b07_row4_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 52.9%, #d65f5f 52.9%, #d65f5f 83.9%, transparent 83.9%);\n",
+ "}\n",
+ "#T_45b07_row4_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 51.5%, #d65f5f 51.5%, #d65f5f 57.4%, transparent 57.4%);\n",
+ "}\n",
+ "#T_45b07_row5_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 52.9%, transparent 52.9%);\n",
+ "}\n",
+ "#T_45b07_row5_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 57.4%, #d65f5f 57.4%, #d65f5f 76.4%, transparent 76.4%);\n",
+ "}\n",
+ "#T_45b07_row6_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 52.9%, #d65f5f 52.9%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_45b07_row6_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 15.3%, #d65f5f 15.3%, #d65f5f 57.4%, transparent 57.4%);\n",
+ "}\n",
+ "#T_45b07_row7_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 52.9%, #d65f5f 52.9%, #d65f5f 84.7%, transparent 84.7%);\n",
+ "}\n",
+ "#T_45b07_row7_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 57.4%, #d65f5f 57.4%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_45b07_row8_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 34.5%, #d65f5f 34.5%, #d65f5f 52.9%, transparent 52.9%);\n",
+ "}\n",
+ "#T_45b07_row8_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 57.4%, transparent 57.4%);\n",
+ "}\n",
+ "#T_45b07_row9_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 52.9%, #d65f5f 52.9%, #d65f5f 78.4%, transparent 78.4%);\n",
+ "}\n",
+ "#T_45b07_row9_col1 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 57.4%, #d65f5f 57.4%, #d65f5f 92.3%, transparent 92.3%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_45b07\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_45b07_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_45b07_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_45b07_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_45b07_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_45b07_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_45b07_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_45b07_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_45b07_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_45b07_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_45b07_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_45b07_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_45b07_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_45b07_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_45b07_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_45b07_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_45b07_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_45b07_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_45b07_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_45b07_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_45b07_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_45b07_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_45b07_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_45b07_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_45b07_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_45b07_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_45b07_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_45b07_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_45b07_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_45b07_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_45b07_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_45b07_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_45b07_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_45b07_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_45b07_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_45b07_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_45b07_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_45b07_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_45b07_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_45b07_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_45b07_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_45b07_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_45b07_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_45b07_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_45b07_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_45b07_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe6049a0>"
+ ]
+ },
+ "execution_count": 56,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.style.bar(subset=['A', 'B'], color='#d65f5f')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Additional keyword arguments give more control on centering and positioning, and you can pass a list of `[color_negative, color_positive]` to highlight lower and higher values or a matplotlib colormap.\n",
+ "\n",
+ "To showcase an example here's how you can change the above with the new `align` option, combined with setting `vmin` and `vmax` limits, the `width` of the figure, and underlying css `props` of cells, leaving space to display the text and the bars. We also use `text_gradient` to color the text the same as the bars using a matplotlib colormap (although in this case the visualization is probably better without this additional effect)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 57,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.945063Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.944819Z",
+ "iopub.status.idle": "2023-11-30T07:57:22.959242Z",
+ "shell.execute_reply": "2023-11-30T07:57:22.958632Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_e7107_row0_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff4a4a 30.0%, #ff4a4a 51.2%, transparent 51.2%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff4a4a;\n",
+ "}\n",
+ "#T_e7107_row0_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffd6d6 30.0%, #ffd6d6 34.8%, transparent 34.8%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffd6d6;\n",
+ "}\n",
+ "#T_e7107_row0_col2, #T_e7107_row4_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " color: #000000;\n",
+ "}\n",
+ "#T_e7107_row0_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff1a1a 30.0%, #ff1a1a 56.9%, transparent 56.9%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff1a1a;\n",
+ "}\n",
+ "#T_e7107_row1_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff4040 30.0%, #ff4040 52.4%, transparent 52.4%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff4040;\n",
+ "}\n",
+ "#T_e7107_row1_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 18.3%, #9a9aff 18.3%, #9a9aff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #9a9aff;\n",
+ "}\n",
+ "#T_e7107_row1_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff9e9e 30.0%, #ff9e9e 41.4%, transparent 41.4%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff9e9e;\n",
+ "}\n",
+ "#T_e7107_row1_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 28.2%, #f0f0ff 28.2%, #f0f0ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #f0f0ff;\n",
+ "}\n",
+ "#T_e7107_row2_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 28.8%, #f4f4ff 28.8%, #f4f4ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #f4f4ff;\n",
+ "}\n",
+ "#T_e7107_row2_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffd4d4 30.0%, #ffd4d4 34.9%, transparent 34.9%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffd4d4;\n",
+ "}\n",
+ "#T_e7107_row2_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #fff0f0 30.0%, #fff0f0 31.7%, transparent 31.7%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #fff0f0;\n",
+ "}\n",
+ "#T_e7107_row2_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff6a6a 30.0%, #ff6a6a 47.5%, transparent 47.5%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff6a6a;\n",
+ "}\n",
+ "#T_e7107_row3_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffb2b2 30.0%, #ffb2b2 39.1%, transparent 39.1%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffb2b2;\n",
+ "}\n",
+ "#T_e7107_row3_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #fff2f2 30.0%, #fff2f2 31.5%, transparent 31.5%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #fff2f2;\n",
+ "}\n",
+ "#T_e7107_row3_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffd2d2 30.0%, #ffd2d2 35.3%, transparent 35.3%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffd2d2;\n",
+ "}\n",
+ "#T_e7107_row3_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffdcdc 30.0%, #ffdcdc 34.0%, transparent 34.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffdcdc;\n",
+ "}\n",
+ "#T_e7107_row4_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff6666 30.0%, #ff6666 47.9%, transparent 47.9%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff6666;\n",
+ "}\n",
+ "#T_e7107_row4_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 27.5%, #eaeaff 27.5%, #eaeaff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #eaeaff;\n",
+ "}\n",
+ "#T_e7107_row4_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffdede 30.0%, #ffdede 33.8%, transparent 33.8%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffdede;\n",
+ "}\n",
+ "#T_e7107_row5_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, #0000ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #0000ff;\n",
+ "}\n",
+ "#T_e7107_row5_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffbcbc 30.0%, #ffbcbc 37.8%, transparent 37.8%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffbcbc;\n",
+ "}\n",
+ "#T_e7107_row5_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffa6a6 30.0%, #ffa6a6 40.4%, transparent 40.4%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffa6a6;\n",
+ "}\n",
+ "#T_e7107_row5_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 21.1%, #b4b4ff 21.1%, #b4b4ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #b4b4ff;\n",
+ "}\n",
+ "#T_e7107_row6_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff1616 30.0%, #ff1616 57.2%, transparent 57.2%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff1616;\n",
+ "}\n",
+ "#T_e7107_row6_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 12.5%, #6a6aff 12.5%, #6a6aff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #6a6aff;\n",
+ "}\n",
+ "#T_e7107_row6_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #fffafa 30.0%, #fffafa 30.5%, transparent 30.5%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #fffafa;\n",
+ "}\n",
+ "#T_e7107_row6_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 27.8%, #ececff 27.8%, #ececff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ececff;\n",
+ "}\n",
+ "#T_e7107_row7_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff6262 30.0%, #ff6262 48.4%, transparent 48.4%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff6262;\n",
+ "}\n",
+ "#T_e7107_row7_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff6868 30.0%, #ff6868 47.6%, transparent 47.6%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff6868;\n",
+ "}\n",
+ "#T_e7107_row7_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #fff0f0 30.0%, #fff0f0 31.9%, transparent 31.9%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #fff0f0;\n",
+ "}\n",
+ "#T_e7107_row7_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffd8d8 30.0%, #ffd8d8 34.5%, transparent 34.5%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffd8d8;\n",
+ "}\n",
+ "#T_e7107_row8_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 19.3%, #a4a4ff 19.3%, #a4a4ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #a4a4ff;\n",
+ "}\n",
+ "#T_e7107_row8_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 6.2%, #3434ff 6.2%, #3434ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #3434ff;\n",
+ "}\n",
+ "#T_e7107_row8_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 25.8%, #dcdcff 25.8%, #dcdcff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #dcdcff;\n",
+ "}\n",
+ "#T_e7107_row8_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ffeeee 30.0%, #ffeeee 31.9%, transparent 31.9%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ffeeee;\n",
+ "}\n",
+ "#T_e7107_row9_col0 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff8282 30.0%, #ff8282 44.8%, transparent 44.8%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff8282;\n",
+ "}\n",
+ "#T_e7107_row9_col1 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 30.0%, #ff8484 30.0%, #ff8484 44.4%, transparent 44.4%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #ff8484;\n",
+ "}\n",
+ "#T_e7107_row9_col2 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 25.4%, #d8d8ff 25.4%, #d8d8ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #d8d8ff;\n",
+ "}\n",
+ "#T_e7107_row9_col3 {\n",
+ " width: 120px;\n",
+ " border-right: 1px solid black;\n",
+ " background: linear-gradient(90deg, transparent 26.4%, #e0e0ff 26.4%, #e0e0ff 30.0%, transparent 30.0%) no-repeat center;\n",
+ " background-size: 100% 50.0%;\n",
+ " color: #e0e0ff;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_e7107\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_e7107_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_e7107_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_e7107_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_e7107_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_e7107_row0_col0\" class=\"data row0 col0\" >1.764</td>\n",
+ " <td id=\"T_e7107_row0_col1\" class=\"data row0 col1\" >0.400</td>\n",
+ " <td id=\"T_e7107_row0_col2\" class=\"data row0 col2\" ></td>\n",
+ " <td id=\"T_e7107_row0_col3\" class=\"data row0 col3\" >2.241</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_e7107_row1_col0\" class=\"data row1 col0\" >1.868</td>\n",
+ " <td id=\"T_e7107_row1_col1\" class=\"data row1 col1\" >-0.977</td>\n",
+ " <td id=\"T_e7107_row1_col2\" class=\"data row1 col2\" >0.950</td>\n",
+ " <td id=\"T_e7107_row1_col3\" class=\"data row1 col3\" >-0.151</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_e7107_row2_col0\" class=\"data row2 col0\" >-0.103</td>\n",
+ " <td id=\"T_e7107_row2_col1\" class=\"data row2 col1\" >0.411</td>\n",
+ " <td id=\"T_e7107_row2_col2\" class=\"data row2 col2\" >0.144</td>\n",
+ " <td id=\"T_e7107_row2_col3\" class=\"data row2 col3\" >1.454</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_e7107_row3_col0\" class=\"data row3 col0\" >0.761</td>\n",
+ " <td id=\"T_e7107_row3_col1\" class=\"data row3 col1\" >0.122</td>\n",
+ " <td id=\"T_e7107_row3_col2\" class=\"data row3 col2\" >0.444</td>\n",
+ " <td id=\"T_e7107_row3_col3\" class=\"data row3 col3\" >0.334</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_e7107_row4_col0\" class=\"data row4 col0\" >1.494</td>\n",
+ " <td id=\"T_e7107_row4_col1\" class=\"data row4 col1\" >-0.205</td>\n",
+ " <td id=\"T_e7107_row4_col2\" class=\"data row4 col2\" >0.313</td>\n",
+ " <td id=\"T_e7107_row4_col3\" class=\"data row4 col3\" ></td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_e7107_row5_col0\" class=\"data row5 col0\" >-2.553</td>\n",
+ " <td id=\"T_e7107_row5_col1\" class=\"data row5 col1\" >0.654</td>\n",
+ " <td id=\"T_e7107_row5_col2\" class=\"data row5 col2\" >0.864</td>\n",
+ " <td id=\"T_e7107_row5_col3\" class=\"data row5 col3\" >-0.742</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_e7107_row6_col0\" class=\"data row6 col0\" >2.270</td>\n",
+ " <td id=\"T_e7107_row6_col1\" class=\"data row6 col1\" >-1.454</td>\n",
+ " <td id=\"T_e7107_row6_col2\" class=\"data row6 col2\" >0.046</td>\n",
+ " <td id=\"T_e7107_row6_col3\" class=\"data row6 col3\" >-0.187</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_e7107_row7_col0\" class=\"data row7 col0\" >1.533</td>\n",
+ " <td id=\"T_e7107_row7_col1\" class=\"data row7 col1\" >1.469</td>\n",
+ " <td id=\"T_e7107_row7_col2\" class=\"data row7 col2\" >0.155</td>\n",
+ " <td id=\"T_e7107_row7_col3\" class=\"data row7 col3\" >0.378</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_e7107_row8_col0\" class=\"data row8 col0\" >-0.888</td>\n",
+ " <td id=\"T_e7107_row8_col1\" class=\"data row8 col1\" >-1.981</td>\n",
+ " <td id=\"T_e7107_row8_col2\" class=\"data row8 col2\" >-0.348</td>\n",
+ " <td id=\"T_e7107_row8_col3\" class=\"data row8 col3\" >0.156</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_e7107_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_e7107_row9_col0\" class=\"data row9 col0\" >1.230</td>\n",
+ " <td id=\"T_e7107_row9_col1\" class=\"data row9 col1\" >1.202</td>\n",
+ " <td id=\"T_e7107_row9_col2\" class=\"data row9 col2\" >-0.387</td>\n",
+ " <td id=\"T_e7107_row9_col3\" class=\"data row9 col3\" >-0.302</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe605e40>"
+ ]
+ },
+ "execution_count": 57,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df2.style.format('{:.3f}', na_rep=\"\")\\\n",
+ " .bar(align=0, vmin=-2.5, vmax=2.5, cmap=\"bwr\", height=50,\n",
+ " width=60, props=\"width: 120px; border-right: 1px solid black;\")\\\n",
+ " .text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The following example aims to give a highlight of the behavior of the new align options:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:22.961760Z",
+ "iopub.status.busy": "2023-11-30T07:57:22.961529Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.006282Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.005769Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "# Hide the construction of the display chart from the user\n",
+ "import pandas as pd\n",
+ "from IPython.display import HTML\n",
+ "\n",
+ "# Test series\n",
+ "test1 = pd.Series([-100,-60,-30,-20], name='All Negative')\n",
+ "test2 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')\n",
+ "test3 = pd.Series([10,20,50,100], name='All Positive')\n",
+ "test4 = pd.Series([100, 103, 101, 102], name='Large Positive')\n",
+ "\n",
+ "\n",
+ "head = \"\"\"\n",
+ "<table>\n",
+ " <thead>\n",
+ " <th>Align</th>\n",
+ " <th>All Negative</th>\n",
+ " <th>Both Neg and Pos</th>\n",
+ " <th>All Positive</th>\n",
+ " <th>Large Positive</th>\n",
+ " </thead>\n",
+ " </tbody>\n",
+ "\n",
+ "\"\"\"\n",
+ "\n",
+ "aligns = ['left', 'right', 'zero', 'mid', 'mean', 99]\n",
+ "for align in aligns:\n",
+ " row = \"<tr><th>{}</th>\".format(align)\n",
+ " for series in [test1,test2,test3, test4]:\n",
+ " s = series.copy()\n",
+ " s.name=''\n",
+ " row += \"<td>{}</td>\".format(s.to_frame().style.hide(axis='index').bar(align=align, \n",
+ " color=['#d65f5f', '#5fba7d'], \n",
+ " width=100).to_html()) #testn['width']\n",
+ " row += '</tr>'\n",
+ " head += row\n",
+ " \n",
+ "head+= \"\"\"\n",
+ "</tbody>\n",
+ "</table>\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 59,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.008460Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.008231Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.012106Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.011578Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "<table>\n",
+ " <thead>\n",
+ " <th>Align</th>\n",
+ " <th>All Negative</th>\n",
+ " <th>Both Neg and Pos</th>\n",
+ " <th>All Positive</th>\n",
+ " <th>Large Positive</th>\n",
+ " </thead>\n",
+ " </tbody>\n",
+ "\n",
+ "<tr><th>left</th><td><style type=\"text/css\">\n",
+ "#T_d3655_row0_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "#T_d3655_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_d3655_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 87.5%, transparent 87.5%);\n",
+ "}\n",
+ "#T_d3655_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_d3655\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_d3655_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_d3655_row0_col0\" class=\"data row0 col0\" >-100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d3655_row1_col0\" class=\"data row1 col0\" >-60</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d3655_row2_col0\" class=\"data row2 col0\" >-30</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d3655_row3_col0\" class=\"data row3 col0\" >-20</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_d0b8c_row0_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "#T_d0b8c_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 5.0%, transparent 5.0%);\n",
+ "}\n",
+ "#T_d0b8c_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 10.0%, transparent 10.0%);\n",
+ "}\n",
+ "#T_d0b8c_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_d0b8c\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_d0b8c_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_d0b8c_row0_col0\" class=\"data row0 col0\" >-10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d0b8c_row1_col0\" class=\"data row1 col0\" >-5</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d0b8c_row2_col0\" class=\"data row2 col0\" >0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d0b8c_row3_col0\" class=\"data row3 col0\" >90</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_9eddb_row0_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "#T_9eddb_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 11.1%, transparent 11.1%);\n",
+ "}\n",
+ "#T_9eddb_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 44.4%, transparent 44.4%);\n",
+ "}\n",
+ "#T_9eddb_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_9eddb\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_9eddb_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_9eddb_row0_col0\" class=\"data row0 col0\" >10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_9eddb_row1_col0\" class=\"data row1 col0\" >20</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_9eddb_row2_col0\" class=\"data row2 col0\" >50</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_9eddb_row3_col0\" class=\"data row3 col0\" >100</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_1b6c1_row0_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "#T_1b6c1_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_1b6c1_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 33.3%, transparent 33.3%);\n",
+ "}\n",
+ "#T_1b6c1_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 66.7%, transparent 66.7%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_1b6c1\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_1b6c1_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_1b6c1_row0_col0\" class=\"data row0 col0\" >100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_1b6c1_row1_col0\" class=\"data row1 col0\" >103</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_1b6c1_row2_col0\" class=\"data row2 col0\" >101</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_1b6c1_row3_col0\" class=\"data row3 col0\" >102</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td></tr><tr><th>right</th><td><style type=\"text/css\">\n",
+ "#T_78156_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_78156_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #d65f5f 50.0%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_78156_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 87.5%, #d65f5f 87.5%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_78156_row3_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_78156\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_78156_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_78156_row0_col0\" class=\"data row0 col0\" >-100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_78156_row1_col0\" class=\"data row1 col0\" >-60</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_78156_row2_col0\" class=\"data row2 col0\" >-30</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_78156_row3_col0\" class=\"data row3 col0\" >-20</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_f2565_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_f2565_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 5.0%, #d65f5f 5.0%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_f2565_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 10.0%, #5fba7d 10.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_f2565_row3_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_f2565\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_f2565_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_f2565_row0_col0\" class=\"data row0 col0\" >-10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f2565_row1_col0\" class=\"data row1 col0\" >-5</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f2565_row2_col0\" class=\"data row2 col0\" >0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f2565_row3_col0\" class=\"data row3 col0\" >90</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_d3846_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_d3846_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 11.1%, #5fba7d 11.1%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_d3846_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 44.4%, #5fba7d 44.4%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_d3846_row3_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_d3846\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_d3846_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_d3846_row0_col0\" class=\"data row0 col0\" >10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d3846_row1_col0\" class=\"data row1 col0\" >20</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d3846_row2_col0\" class=\"data row2 col0\" >50</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_d3846_row3_col0\" class=\"data row3 col0\" >100</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_13543_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_13543_row1_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "#T_13543_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 33.3%, #5fba7d 33.3%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_13543_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 66.7%, #5fba7d 66.7%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_13543\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_13543_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_13543_row0_col0\" class=\"data row0 col0\" >100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_13543_row1_col0\" class=\"data row1 col0\" >103</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_13543_row2_col0\" class=\"data row2 col0\" >101</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_13543_row3_col0\" class=\"data row3 col0\" >102</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td></tr><tr><th>zero</th><td><style type=\"text/css\">\n",
+ "#T_7c47d_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_7c47d_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 20.0%, #d65f5f 20.0%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_7c47d_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 35.0%, #d65f5f 35.0%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_7c47d_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 40.0%, #d65f5f 40.0%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_7c47d\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_7c47d_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_7c47d_row0_col0\" class=\"data row0 col0\" >-100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_7c47d_row1_col0\" class=\"data row1 col0\" >-60</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_7c47d_row2_col0\" class=\"data row2 col0\" >-30</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_7c47d_row3_col0\" class=\"data row3 col0\" >-20</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_07049_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 44.4%, #d65f5f 44.4%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_07049_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 47.2%, #d65f5f 47.2%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_07049_row2_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "#T_07049_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_07049\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_07049_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_07049_row0_col0\" class=\"data row0 col0\" >-10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_07049_row1_col0\" class=\"data row1 col0\" >-5</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_07049_row2_col0\" class=\"data row2 col0\" >0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_07049_row3_col0\" class=\"data row3 col0\" >90</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_0af49_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 55.0%, transparent 55.0%);\n",
+ "}\n",
+ "#T_0af49_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 60.0%, transparent 60.0%);\n",
+ "}\n",
+ "#T_0af49_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 75.0%, transparent 75.0%);\n",
+ "}\n",
+ "#T_0af49_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_0af49\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_0af49_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_0af49_row0_col0\" class=\"data row0 col0\" >10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_0af49_row1_col0\" class=\"data row1 col0\" >20</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_0af49_row2_col0\" class=\"data row2 col0\" >50</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_0af49_row3_col0\" class=\"data row3 col0\" >100</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_a0ac6_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 98.5%, transparent 98.5%);\n",
+ "}\n",
+ "#T_a0ac6_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_a0ac6_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 99.0%, transparent 99.0%);\n",
+ "}\n",
+ "#T_a0ac6_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 99.5%, transparent 99.5%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_a0ac6\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_a0ac6_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_a0ac6_row0_col0\" class=\"data row0 col0\" >100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_a0ac6_row1_col0\" class=\"data row1 col0\" >103</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_a0ac6_row2_col0\" class=\"data row2 col0\" >101</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_a0ac6_row3_col0\" class=\"data row3 col0\" >102</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td></tr><tr><th>mid</th><td><style type=\"text/css\">\n",
+ "#T_68080_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_68080_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 40.0%, #d65f5f 40.0%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_68080_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 70.0%, #d65f5f 70.0%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_68080_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 80.0%, #d65f5f 80.0%, #d65f5f 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_68080\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_68080_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_68080_row0_col0\" class=\"data row0 col0\" >-100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_68080_row1_col0\" class=\"data row1 col0\" >-60</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_68080_row2_col0\" class=\"data row2 col0\" >-30</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_68080_row3_col0\" class=\"data row3 col0\" >-20</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_c885b_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 10.0%, transparent 10.0%);\n",
+ "}\n",
+ "#T_c885b_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 5.0%, #d65f5f 5.0%, #d65f5f 10.0%, transparent 10.0%);\n",
+ "}\n",
+ "#T_c885b_row2_col0 {\n",
+ " width: 10em;\n",
+ "}\n",
+ "#T_c885b_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 10.0%, #5fba7d 10.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_c885b\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_c885b_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_c885b_row0_col0\" class=\"data row0 col0\" >-10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_c885b_row1_col0\" class=\"data row1 col0\" >-5</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_c885b_row2_col0\" class=\"data row2 col0\" >0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_c885b_row3_col0\" class=\"data row3 col0\" >90</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_f7ed7_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 10.0%, transparent 10.0%);\n",
+ "}\n",
+ "#T_f7ed7_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 20.0%, transparent 20.0%);\n",
+ "}\n",
+ "#T_f7ed7_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_f7ed7_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_f7ed7\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_f7ed7_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_f7ed7_row0_col0\" class=\"data row0 col0\" >10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f7ed7_row1_col0\" class=\"data row1 col0\" >20</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f7ed7_row2_col0\" class=\"data row2 col0\" >50</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f7ed7_row3_col0\" class=\"data row3 col0\" >100</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_ab8eb_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 97.1%, transparent 97.1%);\n",
+ "}\n",
+ "#T_ab8eb_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_ab8eb_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 98.1%, transparent 98.1%);\n",
+ "}\n",
+ "#T_ab8eb_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #5fba7d 99.0%, transparent 99.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_ab8eb\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_ab8eb_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_ab8eb_row0_col0\" class=\"data row0 col0\" >100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_ab8eb_row1_col0\" class=\"data row1 col0\" >103</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_ab8eb_row2_col0\" class=\"data row2 col0\" >101</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_ab8eb_row3_col0\" class=\"data row3 col0\" >102</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td></tr><tr><th>mean</th><td><style type=\"text/css\">\n",
+ "#T_76801_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_76801_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 42.1%, #d65f5f 42.1%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_76801_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 73.7%, transparent 73.7%);\n",
+ "}\n",
+ "#T_76801_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 84.2%, transparent 84.2%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_76801\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_76801_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_76801_row0_col0\" class=\"data row0 col0\" >-100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_76801_row1_col0\" class=\"data row1 col0\" >-60</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_76801_row2_col0\" class=\"data row2 col0\" >-30</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_76801_row3_col0\" class=\"data row3 col0\" >-20</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_4566b_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 29.8%, #d65f5f 29.8%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_4566b_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 33.3%, #d65f5f 33.3%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_4566b_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 36.8%, #d65f5f 36.8%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_4566b_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_4566b\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_4566b_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_4566b_row0_col0\" class=\"data row0 col0\" >-10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_4566b_row1_col0\" class=\"data row1 col0\" >-5</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_4566b_row2_col0\" class=\"data row2 col0\" >0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_4566b_row3_col0\" class=\"data row3 col0\" >90</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_f5650_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 18.2%, #d65f5f 18.2%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_f5650_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 27.3%, #d65f5f 27.3%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_f5650_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 54.5%, transparent 54.5%);\n",
+ "}\n",
+ "#T_f5650_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_f5650\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_f5650_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_f5650_row0_col0\" class=\"data row0 col0\" >10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f5650_row1_col0\" class=\"data row1 col0\" >20</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f5650_row2_col0\" class=\"data row2 col0\" >50</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_f5650_row3_col0\" class=\"data row3 col0\" >100</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_e4bfb_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_e4bfb_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_e4bfb_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 33.3%, #d65f5f 33.3%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_e4bfb_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 66.7%, transparent 66.7%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_e4bfb\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_e4bfb_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_e4bfb_row0_col0\" class=\"data row0 col0\" >100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_e4bfb_row1_col0\" class=\"data row1 col0\" >103</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_e4bfb_row2_col0\" class=\"data row2 col0\" >101</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_e4bfb_row3_col0\" class=\"data row3 col0\" >102</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td></tr><tr><th>99</th><td><style type=\"text/css\">\n",
+ "#T_3f8cc_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_3f8cc_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 10.1%, #d65f5f 10.1%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_3f8cc_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 17.6%, #d65f5f 17.6%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_3f8cc_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 20.1%, #d65f5f 20.1%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_3f8cc\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_3f8cc_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_3f8cc_row0_col0\" class=\"data row0 col0\" >-100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_3f8cc_row1_col0\" class=\"data row1 col0\" >-60</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_3f8cc_row2_col0\" class=\"data row2 col0\" >-30</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_3f8cc_row3_col0\" class=\"data row3 col0\" >-20</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_4a22f_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_4a22f_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 2.3%, #d65f5f 2.3%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_4a22f_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 4.6%, #d65f5f 4.6%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_4a22f_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 45.9%, #d65f5f 45.9%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_4a22f\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_4a22f_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_4a22f_row0_col0\" class=\"data row0 col0\" >-10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_4a22f_row1_col0\" class=\"data row1 col0\" >-5</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_4a22f_row2_col0\" class=\"data row2 col0\" >0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_4a22f_row3_col0\" class=\"data row3 col0\" >90</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_bddcd_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_bddcd_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 5.6%, #d65f5f 5.6%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_bddcd_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 22.5%, #d65f5f 22.5%, #d65f5f 50.0%, transparent 50.0%);\n",
+ "}\n",
+ "#T_bddcd_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 50.6%, transparent 50.6%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_bddcd\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_bddcd_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_bddcd_row0_col0\" class=\"data row0 col0\" >10</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_bddcd_row1_col0\" class=\"data row1 col0\" >20</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_bddcd_row2_col0\" class=\"data row2 col0\" >50</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_bddcd_row3_col0\" class=\"data row3 col0\" >100</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td><td><style type=\"text/css\">\n",
+ "#T_51da5_row0_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 62.5%, transparent 62.5%);\n",
+ "}\n",
+ "#T_51da5_row1_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 100.0%, transparent 100.0%);\n",
+ "}\n",
+ "#T_51da5_row2_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 75.0%, transparent 75.0%);\n",
+ "}\n",
+ "#T_51da5_row3_col0 {\n",
+ " width: 10em;\n",
+ " background: linear-gradient(90deg, transparent 50.0%, #5fba7d 50.0%, #5fba7d 87.5%, transparent 87.5%);\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_51da5\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_51da5_level0_col0\" class=\"col_heading level0 col0\" ></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_51da5_row0_col0\" class=\"data row0 col0\" >100</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_51da5_row1_col0\" class=\"data row1 col0\" >103</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_51da5_row2_col0\" class=\"data row2 col0\" >101</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_51da5_row3_col0\" class=\"data row3 col0\" >102</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</td></tr>\n",
+ "</tbody>\n",
+ "</table>"
+ ],
+ "text/plain": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ "execution_count": 59,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "HTML(head)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Sharing styles"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Say you have a lovely style built up for a DataFrame, and now you want to apply the same style to a second DataFrame. Export the style with `df1.style.export`, and import it on the second DataFrame with `df1.style.set`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 60,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.014092Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.013925Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.020838Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.020282Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_b3683 th {\n",
+ " color: blue;\n",
+ "}\n",
+ "#T_b3683_row1_col1, #T_b3683_row5_col0, #T_b3683_row5_col3, #T_b3683_row6_col1, #T_b3683_row8_col0, #T_b3683_row8_col1, #T_b3683_row8_col2, #T_b3683_row9_col2, #T_b3683_row9_col3 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_b3683_row1_col3, #T_b3683_row2_col0, #T_b3683_row4_col1, #T_b3683_row6_col3 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "#T_b3683_row2_col2, #T_b3683_row3_col1, #T_b3683_row6_col2, #T_b3683_row7_col2, #T_b3683_row8_col3 {\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_b3683\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_b3683_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n",
+ " <th id=\"T_b3683_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n",
+ " <th id=\"T_b3683_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n",
+ " <th id=\"T_b3683_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n",
+ " <td id=\"T_b3683_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n",
+ " <td id=\"T_b3683_row0_col2\" class=\"data row0 col2\" >nan</td>\n",
+ " <td id=\"T_b3683_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n",
+ " <td id=\"T_b3683_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n",
+ " <td id=\"T_b3683_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n",
+ " <td id=\"T_b3683_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n",
+ " <td id=\"T_b3683_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n",
+ " <td id=\"T_b3683_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n",
+ " <td id=\"T_b3683_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n",
+ " <td id=\"T_b3683_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n",
+ " <td id=\"T_b3683_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n",
+ " <td id=\"T_b3683_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n",
+ " <td id=\"T_b3683_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n",
+ " <td id=\"T_b3683_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n",
+ " <td id=\"T_b3683_row4_col3\" class=\"data row4 col3\" >nan</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n",
+ " <td id=\"T_b3683_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n",
+ " <td id=\"T_b3683_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n",
+ " <td id=\"T_b3683_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n",
+ " <td id=\"T_b3683_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n",
+ " <td id=\"T_b3683_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n",
+ " <td id=\"T_b3683_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n",
+ " <td id=\"T_b3683_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n",
+ " <td id=\"T_b3683_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n",
+ " <td id=\"T_b3683_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n",
+ " <td id=\"T_b3683_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n",
+ " <td id=\"T_b3683_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n",
+ " <td id=\"T_b3683_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_b3683_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n",
+ " <td id=\"T_b3683_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n",
+ " <td id=\"T_b3683_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n",
+ " <td id=\"T_b3683_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe607e20>"
+ ]
+ },
+ "execution_count": 60,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "style1 = df2.style\\\n",
+ " .map(style_negative, props='color:red;')\\\n",
+ " .map(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\\\n",
+ " .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\\\n",
+ " .hide(axis=\"index\")\n",
+ "style1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 61,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.023029Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.022804Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.029151Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.028723Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_2d8cf th {\n",
+ " color: blue;\n",
+ "}\n",
+ "#T_2d8cf_row0_col0, #T_2d8cf_row0_col1, #T_2d8cf_row0_col2, #T_2d8cf_row1_col0, #T_2d8cf_row1_col1, #T_2d8cf_row1_col2, #T_2d8cf_row2_col0, #T_2d8cf_row2_col2, #T_2d8cf_row3_col0, #T_2d8cf_row3_col1 {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_2d8cf_row2_col1, #T_2d8cf_row3_col2 {\n",
+ " color: red;\n",
+ " opacity: 20%;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_2d8cf\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th id=\"T_2d8cf_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ " <th id=\"T_2d8cf_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ " <th id=\"T_2d8cf_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ " <th id=\"T_2d8cf_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <td id=\"T_2d8cf_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ " <td id=\"T_2d8cf_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ " <td id=\"T_2d8cf_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ " <td id=\"T_2d8cf_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_2d8cf_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ " <td id=\"T_2d8cf_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ " <td id=\"T_2d8cf_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ " <td id=\"T_2d8cf_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_2d8cf_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ " <td id=\"T_2d8cf_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ " <td id=\"T_2d8cf_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ " <td id=\"T_2d8cf_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <td id=\"T_2d8cf_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ " <td id=\"T_2d8cf_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ " <td id=\"T_2d8cf_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ " <td id=\"T_2d8cf_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe606a10>"
+ ]
+ },
+ "execution_count": 61,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "style2 = df3.style\n",
+ "style2.use(style1.export())\n",
+ "style2"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that you're able to share the styles even though they're data aware. The styles are re-evaluated on the new DataFrame they've been `use`d upon."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Limitations\n",
+ "\n",
+ "- DataFrame only (use `Series.to_frame().style`)\n",
+ "- The index and columns do not need to be unique, but certain styling functions can only work with unique indexes.\n",
+ "- No large repr, and construction performance isn't great; although we have some [HTML optimizations](#Optimization)\n",
+ "- You can only apply styles, you can't insert new HTML entities, except via subclassing."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Other Fun and Useful Stuff\n",
+ "\n",
+ "Here are a few interesting examples."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Widgets\n",
+ "\n",
+ "`Styler` interacts pretty well with widgets. If you're viewing this online instead of running the notebook yourself, you're missing out on interactively adjusting the color palette."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 62,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.031236Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.031066Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.059732Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.059004Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "af0b0fee7f814793b690e5e2c4cd730b",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "interactive(children=(IntSlider(value=179, description='h_neg', max=359), IntSlider(value=179, description='h_…"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "from ipywidgets import widgets\n",
+ "@widgets.interact\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):\n",
+ " return df2.style.background_gradient(\n",
+ " cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,\n",
+ " as_cmap=True)\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Magnify"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 63,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.063147Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.062982Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.066124Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.065644Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "def magnify():\n",
+ " return [dict(selector=\"th\",\n",
+ " props=[(\"font-size\", \"4pt\")]),\n",
+ " dict(selector=\"td\",\n",
+ " props=[('padding', \"0em 0em\")]),\n",
+ " dict(selector=\"th:hover\",\n",
+ " props=[(\"font-size\", \"12pt\")]),\n",
+ " dict(selector=\"tr:hover td:hover\",\n",
+ " props=[('max-width', '200px'),\n",
+ " ('font-size', '12pt')])\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 64,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.068089Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.067891Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.105803Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.105292Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_cfd1f th {\n",
+ " font-size: 4pt;\n",
+ "}\n",
+ "#T_cfd1f td {\n",
+ " padding: 0em 0em;\n",
+ "}\n",
+ "#T_cfd1f th:hover {\n",
+ " font-size: 12pt;\n",
+ "}\n",
+ "#T_cfd1f tr:hover td:hover {\n",
+ " max-width: 200px;\n",
+ " font-size: 12pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col0, #T_cfd1f_row1_col24, #T_cfd1f_row8_col18 {\n",
+ " background-color: #eaecf0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col1, #T_cfd1f_row13_col22 {\n",
+ " background-color: #b6c9e0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col2, #T_cfd1f_row1_col17, #T_cfd1f_row10_col12 {\n",
+ " background-color: #e9b1bc;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col3, #T_cfd1f_row1_col10, #T_cfd1f_row6_col6, #T_cfd1f_row13_col8 {\n",
+ " background-color: #ebc2ca;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col4, #T_cfd1f_row13_col19, #T_cfd1f_row19_col16 {\n",
+ " background-color: #e8aab6;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col5, #T_cfd1f_row1_col19 {\n",
+ " background-color: #efdcdf;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col6, #T_cfd1f_row3_col8, #T_cfd1f_row6_col8 {\n",
+ " background-color: #ebc1c9;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col7 {\n",
+ " background-color: #82a4cf;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col8, #T_cfd1f_row9_col21 {\n",
+ " background-color: #dd5f78;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col9, #T_cfd1f_row2_col24 {\n",
+ " background-color: #c1d0e3;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col10, #T_cfd1f_row3_col15, #T_cfd1f_row6_col16, #T_cfd1f_row14_col15, #T_cfd1f_row18_col16 {\n",
+ " background-color: #e8adb8;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col11, #T_cfd1f_row5_col17, #T_cfd1f_row10_col3, #T_cfd1f_row12_col18 {\n",
+ " background-color: #efdade;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col12, #T_cfd1f_row9_col24, #T_cfd1f_row10_col1 {\n",
+ " background-color: #6e96c8;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col13 {\n",
+ " background-color: #e48fa0;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col14, #T_cfd1f_row3_col12, #T_cfd1f_row9_col3, #T_cfd1f_row9_col10, #T_cfd1f_row9_col13, #T_cfd1f_row13_col13 {\n",
+ " background-color: #f2eff0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col15, #T_cfd1f_row8_col17, #T_cfd1f_row10_col2 {\n",
+ " background-color: #e595a5;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col16 {\n",
+ " background-color: #abc1dc;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col17, #T_cfd1f_row6_col14, #T_cfd1f_row12_col15, #T_cfd1f_row13_col17 {\n",
+ " background-color: #e7a3b0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col18, #T_cfd1f_row3_col20, #T_cfd1f_row17_col20 {\n",
+ " background-color: #b5c8df;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col19, #T_cfd1f_row4_col3, #T_cfd1f_row5_col12, #T_cfd1f_row13_col3 {\n",
+ " background-color: #edcdd3;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col20, #T_cfd1f_row9_col1, #T_cfd1f_row16_col1 {\n",
+ " background-color: #648fc5;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col21, #T_cfd1f_row1_col21, #T_cfd1f_row2_col21, #T_cfd1f_row3_col21, #T_cfd1f_row4_col21, #T_cfd1f_row5_col21, #T_cfd1f_row6_col21, #T_cfd1f_row7_col21, #T_cfd1f_row8_col16, #T_cfd1f_row9_col16, #T_cfd1f_row10_col16, #T_cfd1f_row10_col21, #T_cfd1f_row11_col16, #T_cfd1f_row12_col21, #T_cfd1f_row13_col21, #T_cfd1f_row14_col21, #T_cfd1f_row15_col21, #T_cfd1f_row16_col21, #T_cfd1f_row17_col21, #T_cfd1f_row18_col21, #T_cfd1f_row19_col21 {\n",
+ " background-color: #d73c5b;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col22, #T_cfd1f_row1_col7, #T_cfd1f_row2_col7, #T_cfd1f_row3_col4, #T_cfd1f_row3_col7, #T_cfd1f_row4_col4, #T_cfd1f_row5_col4, #T_cfd1f_row6_col24, #T_cfd1f_row7_col24, #T_cfd1f_row8_col4, #T_cfd1f_row9_col4, #T_cfd1f_row10_col4, #T_cfd1f_row11_col4, #T_cfd1f_row12_col7, #T_cfd1f_row13_col7, #T_cfd1f_row14_col24, #T_cfd1f_row15_col23, #T_cfd1f_row16_col23, #T_cfd1f_row17_col23, #T_cfd1f_row18_col7, #T_cfd1f_row19_col23 {\n",
+ " background-color: #4479bb;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col23 {\n",
+ " background-color: #cdd9e7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row0_col24 {\n",
+ " background-color: #e18093;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col0 {\n",
+ " background-color: #e0798d;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col1 {\n",
+ " background-color: #adc2dd;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col2, #T_cfd1f_row8_col5, #T_cfd1f_row11_col19 {\n",
+ " background-color: #e69ead;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col3, #T_cfd1f_row1_col13, #T_cfd1f_row18_col8 {\n",
+ " background-color: #e6a0ae;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col4, #T_cfd1f_row5_col18, #T_cfd1f_row6_col10, #T_cfd1f_row10_col11, #T_cfd1f_row11_col10, #T_cfd1f_row17_col10 {\n",
+ " background-color: #c9d6e6;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col5 {\n",
+ " background-color: #f0e0e3;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col6 {\n",
+ " background-color: #da506b;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col8 {\n",
+ " background-color: #e17e92;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col9, #T_cfd1f_row5_col10, #T_cfd1f_row14_col11, #T_cfd1f_row16_col6 {\n",
+ " background-color: #bccce1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col11, #T_cfd1f_row15_col14 {\n",
+ " background-color: #f0dee2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col12, #T_cfd1f_row12_col10, #T_cfd1f_row16_col10 {\n",
+ " background-color: #aec3dd;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col14, #T_cfd1f_row9_col14 {\n",
+ " background-color: #e07389;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col15, #T_cfd1f_row12_col19 {\n",
+ " background-color: #e69dab;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col16 {\n",
+ " background-color: #eab8c2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col18, #T_cfd1f_row6_col9, #T_cfd1f_row7_col3, #T_cfd1f_row11_col8 {\n",
+ " background-color: #eff0f2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col20, #T_cfd1f_row2_col20, #T_cfd1f_row7_col22 {\n",
+ " background-color: #9fb8d8;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col22, #T_cfd1f_row14_col1, #T_cfd1f_row17_col22, #T_cfd1f_row19_col0 {\n",
+ " background-color: #85a6d0;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row1_col23, #T_cfd1f_row19_col9 {\n",
+ " background-color: #d7dfea;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col0, #T_cfd1f_row7_col0, #T_cfd1f_row7_col15, #T_cfd1f_row8_col13 {\n",
+ " background-color: #f2eeef;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col1, #T_cfd1f_row4_col1 {\n",
+ " background-color: #5887c1;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col2, #T_cfd1f_row14_col5, #T_cfd1f_row18_col5, #T_cfd1f_row19_col2, #T_cfd1f_row19_col3, #T_cfd1f_row19_col5 {\n",
+ " background-color: #ebbfc8;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col3, #T_cfd1f_row3_col3 {\n",
+ " background-color: #c5d3e4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col4, #T_cfd1f_row10_col23 {\n",
+ " background-color: #81a3ce;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col5, #T_cfd1f_row7_col13, #T_cfd1f_row9_col9, #T_cfd1f_row16_col17 {\n",
+ " background-color: #e8ebf0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col6, #T_cfd1f_row2_col14, #T_cfd1f_row13_col14 {\n",
+ " background-color: #e38a9c;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col8, #T_cfd1f_row3_col2 {\n",
+ " background-color: #eabbc4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col9 {\n",
+ " background-color: #769ccb;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col10, #T_cfd1f_row3_col10, #T_cfd1f_row12_col6, #T_cfd1f_row17_col6 {\n",
+ " background-color: #d1dbe8;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col11, #T_cfd1f_row2_col16, #T_cfd1f_row2_col17, #T_cfd1f_row5_col13, #T_cfd1f_row6_col0, #T_cfd1f_row13_col18 {\n",
+ " background-color: #e3e8ee;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col12, #T_cfd1f_row5_col11 {\n",
+ " background-color: #dee4ec;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col13, #T_cfd1f_row5_col0, #T_cfd1f_row5_col15, #T_cfd1f_row17_col18, #T_cfd1f_row19_col13 {\n",
+ " background-color: #f1eaeb;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col15, #T_cfd1f_row5_col9, #T_cfd1f_row8_col3, #T_cfd1f_row10_col10, #T_cfd1f_row17_col17 {\n",
+ " background-color: #f1e7e9;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col18, #T_cfd1f_row9_col20 {\n",
+ " background-color: #b8c9e0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col19, #T_cfd1f_row4_col8 {\n",
+ " background-color: #f2eded;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col22, #T_cfd1f_row14_col23 {\n",
+ " background-color: #608dc4;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row2_col23, #T_cfd1f_row5_col22, #T_cfd1f_row13_col1, #T_cfd1f_row16_col22 {\n",
+ " background-color: #8dacd2;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col0, #T_cfd1f_row8_col9, #T_cfd1f_row9_col8, #T_cfd1f_row11_col18 {\n",
+ " background-color: #eed4d9;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col1, #T_cfd1f_row15_col4, #T_cfd1f_row15_col24, #T_cfd1f_row16_col7, #T_cfd1f_row18_col4 {\n",
+ " background-color: #5383bf;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col5, #T_cfd1f_row11_col9 {\n",
+ " background-color: #e4e9ee;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col6 {\n",
+ " background-color: #e28194;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col9 {\n",
+ " background-color: #b0c4dd;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col11, #T_cfd1f_row4_col9 {\n",
+ " background-color: #c0cfe3;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col13, #T_cfd1f_row15_col9 {\n",
+ " background-color: #ccd8e7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col14 {\n",
+ " background-color: #e3889a;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col16, #T_cfd1f_row15_col19, #T_cfd1f_row16_col2 {\n",
+ " background-color: #dfe5ed;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col17, #T_cfd1f_row11_col2, #T_cfd1f_row14_col18 {\n",
+ " background-color: #eed6db;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col18, #T_cfd1f_row8_col23 {\n",
+ " background-color: #a6bdda;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col19, #T_cfd1f_row4_col17, #T_cfd1f_row7_col10, #T_cfd1f_row8_col10 {\n",
+ " background-color: #edccd2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col22, #T_cfd1f_row17_col24 {\n",
+ " background-color: #7299c9;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col23 {\n",
+ " background-color: #8cabd2;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row3_col24 {\n",
+ " background-color: #cbd7e6;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col0, #T_cfd1f_row8_col2, #T_cfd1f_row9_col17 {\n",
+ " background-color: #e59aa9;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col2, #T_cfd1f_row7_col6, #T_cfd1f_row15_col15, #T_cfd1f_row18_col2, #T_cfd1f_row18_col19 {\n",
+ " background-color: #ecc9d0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col5, #T_cfd1f_row6_col5, #T_cfd1f_row6_col15, #T_cfd1f_row10_col6 {\n",
+ " background-color: #f0e4e6;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col6 {\n",
+ " background-color: #e38b9d;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col7, #T_cfd1f_row11_col23, #T_cfd1f_row17_col4, #T_cfd1f_row17_col7 {\n",
+ " background-color: #5182bf;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col10, #T_cfd1f_row7_col11, #T_cfd1f_row14_col9 {\n",
+ " background-color: #e6eaef;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col11, #T_cfd1f_row16_col19 {\n",
+ " background-color: #c7d4e5;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col12, #T_cfd1f_row7_col12, #T_cfd1f_row10_col13 {\n",
+ " background-color: #e7eaef;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col13, #T_cfd1f_row10_col9, #T_cfd1f_row10_col18, #T_cfd1f_row17_col2 {\n",
+ " background-color: #f0e1e4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col14 {\n",
+ " background-color: #e17c90;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col15, #T_cfd1f_row19_col8 {\n",
+ " background-color: #e7a8b5;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col16, #T_cfd1f_row5_col5, #T_cfd1f_row6_col12, #T_cfd1f_row10_col20, #T_cfd1f_row14_col6, #T_cfd1f_row18_col14 {\n",
+ " background-color: #f1e5e7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col18, #T_cfd1f_row9_col22 {\n",
+ " background-color: #b2c6de;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col19, #T_cfd1f_row12_col8, #T_cfd1f_row12_col17, #T_cfd1f_row13_col6, #T_cfd1f_row16_col16, #T_cfd1f_row17_col8 {\n",
+ " background-color: #edcad1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col20, #T_cfd1f_row9_col23, #T_cfd1f_row12_col11 {\n",
+ " background-color: #9eb8d8;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col22 {\n",
+ " background-color: #7b9fcd;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col23, #T_cfd1f_row6_col23, #T_cfd1f_row14_col12 {\n",
+ " background-color: #94b0d4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row4_col24, #T_cfd1f_row17_col13 {\n",
+ " background-color: #a1b9d9;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col1 {\n",
+ " background-color: #638ec5;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col2, #T_cfd1f_row6_col17, #T_cfd1f_row18_col17, #T_cfd1f_row19_col17 {\n",
+ " background-color: #edd0d6;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col3, #T_cfd1f_row14_col8, #T_cfd1f_row15_col17, #T_cfd1f_row17_col16 {\n",
+ " background-color: #ecc6ce;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col6 {\n",
+ " background-color: #e28497;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col7, #T_cfd1f_row18_col1 {\n",
+ " background-color: #6d95c8;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col8, #T_cfd1f_row13_col20, #T_cfd1f_row14_col3, #T_cfd1f_row14_col20, #T_cfd1f_row15_col8 {\n",
+ " background-color: #f0e3e5;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col14 {\n",
+ " background-color: #e17a8f;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col16, #T_cfd1f_row8_col6, #T_cfd1f_row9_col5, #T_cfd1f_row9_col15, #T_cfd1f_row12_col2 {\n",
+ " background-color: #ecc5cc;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col19, #T_cfd1f_row14_col19, #T_cfd1f_row18_col18 {\n",
+ " background-color: #eed2d7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col20, #T_cfd1f_row11_col22, #T_cfd1f_row19_col20 {\n",
+ " background-color: #b4c7df;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col23, #T_cfd1f_row6_col20 {\n",
+ " background-color: #a2bad9;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row5_col24 {\n",
+ " background-color: #7198c9;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col1 {\n",
+ " background-color: #4f80be;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col2, #T_cfd1f_row15_col5, #T_cfd1f_row17_col3, #T_cfd1f_row19_col14 {\n",
+ " background-color: #efdde0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col3, #T_cfd1f_row13_col12, #T_cfd1f_row17_col15 {\n",
+ " background-color: #edeef1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col4, #T_cfd1f_row9_col7, #T_cfd1f_row18_col0 {\n",
+ " background-color: #6a93c7;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col7, #T_cfd1f_row7_col1, #T_cfd1f_row10_col7 {\n",
+ " background-color: #759bca;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col11 {\n",
+ " background-color: #c3d1e4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col13, #T_cfd1f_row10_col0, #T_cfd1f_row18_col9 {\n",
+ " background-color: #c4d2e4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col18, #T_cfd1f_row11_col6 {\n",
+ " background-color: #d5deea;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col19 {\n",
+ " background-color: #e69caa;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row6_col22 {\n",
+ " background-color: #7fa2ce;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col2, #T_cfd1f_row10_col8 {\n",
+ " background-color: #ebbec6;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col4, #T_cfd1f_row19_col7 {\n",
+ " background-color: #5585c0;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col5 {\n",
+ " background-color: #e9b5bf;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col7, #T_cfd1f_row17_col0 {\n",
+ " background-color: #628ec4;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col8 {\n",
+ " background-color: #e28396;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col9, #T_cfd1f_row13_col15 {\n",
+ " background-color: #e7a7b4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col14, #T_cfd1f_row12_col16 {\n",
+ " background-color: #dc5c76;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col16 {\n",
+ " background-color: #db5771;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col17, #T_cfd1f_row9_col19 {\n",
+ " background-color: #e492a3;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col18, #T_cfd1f_row9_col18, #T_cfd1f_row14_col13, #T_cfd1f_row18_col11 {\n",
+ " background-color: #d8e0eb;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col19, #T_cfd1f_row12_col14 {\n",
+ " background-color: #e491a1;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col20, #T_cfd1f_row9_col0, #T_cfd1f_row15_col6, #T_cfd1f_row16_col18 {\n",
+ " background-color: #dae1eb;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row7_col23, #T_cfd1f_row16_col9 {\n",
+ " background-color: #95b1d5;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col0 {\n",
+ " background-color: #d0dae8;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col1 {\n",
+ " background-color: #487cbc;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col7 {\n",
+ " background-color: #4d7fbe;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col8 {\n",
+ " background-color: #e494a4;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col11, #T_cfd1f_row19_col10 {\n",
+ " background-color: #d3dce9;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col12 {\n",
+ " background-color: #e9b4be;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col14 {\n",
+ " background-color: #d94a67;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col15, #T_cfd1f_row11_col3, #T_cfd1f_row16_col15, #T_cfd1f_row18_col6 {\n",
+ " background-color: #f2ebec;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col19, #T_cfd1f_row11_col14, #T_cfd1f_row14_col16 {\n",
+ " background-color: #e0748a;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col20, #T_cfd1f_row13_col10, #T_cfd1f_row15_col10 {\n",
+ " background-color: #dbe2eb;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col21 {\n",
+ " background-color: #da516c;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col22, #T_cfd1f_row14_col0 {\n",
+ " background-color: #90aed3;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row8_col24, #T_cfd1f_row12_col24, #T_cfd1f_row13_col4 {\n",
+ " background-color: #5c8ac2;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row9_col2, #T_cfd1f_row11_col15 {\n",
+ " background-color: #e599a8;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row9_col6, #T_cfd1f_row16_col14, #T_cfd1f_row17_col14, #T_cfd1f_row18_col10 {\n",
+ " background-color: #ebedf1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row9_col11, #T_cfd1f_row12_col9, #T_cfd1f_row15_col20 {\n",
+ " background-color: #dce3ec;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row9_col12, #T_cfd1f_row13_col5, #T_cfd1f_row14_col2 {\n",
+ " background-color: #e9b2bd;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row10_col5, #T_cfd1f_row11_col17 {\n",
+ " background-color: #eab7c1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row10_col14 {\n",
+ " background-color: #dd657d;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row10_col15 {\n",
+ " background-color: #e6a1af;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row10_col17 {\n",
+ " background-color: #df7086;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row10_col19 {\n",
+ " background-color: #e28698;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row10_col22, #T_cfd1f_row18_col13 {\n",
+ " background-color: #cfdae7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row10_col24, #T_cfd1f_row13_col23 {\n",
+ " background-color: #6f97c9;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col0, #T_cfd1f_row11_col13 {\n",
+ " background-color: #a7bedb;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col1 {\n",
+ " background-color: #6791c6;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col5 {\n",
+ " background-color: #e2e7ee;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col7 {\n",
+ " background-color: #477bbb;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col11 {\n",
+ " background-color: #a5bcda;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col12, #T_cfd1f_row12_col12, #T_cfd1f_row13_col9 {\n",
+ " background-color: #f1e8ea;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col20, #T_cfd1f_row14_col10, #T_cfd1f_row15_col18, #T_cfd1f_row19_col15, #T_cfd1f_row19_col19 {\n",
+ " background-color: #f2f1f1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col21 {\n",
+ " background-color: #d8415f;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row11_col24, #T_cfd1f_row16_col4 {\n",
+ " background-color: #457abb;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col0, #T_cfd1f_row16_col13 {\n",
+ " background-color: #91afd4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col1 {\n",
+ " background-color: #7a9fcc;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col3, #T_cfd1f_row16_col8, #T_cfd1f_row19_col18 {\n",
+ " background-color: #efd7dc;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col4, #T_cfd1f_row18_col23 {\n",
+ " background-color: #5484c0;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col5, #T_cfd1f_row15_col2 {\n",
+ " background-color: #edcfd5;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col13 {\n",
+ " background-color: #c8d5e5;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col20, #T_cfd1f_row15_col3, #T_cfd1f_row17_col5, #T_cfd1f_row19_col6 {\n",
+ " background-color: #f1f1f2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col22 {\n",
+ " background-color: #aac0dc;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row12_col23, #T_cfd1f_row19_col4 {\n",
+ " background-color: #497dbc;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row13_col0, #T_cfd1f_row17_col11, #T_cfd1f_row18_col20 {\n",
+ " background-color: #bacbe1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row13_col2 {\n",
+ " background-color: #e7a5b2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row13_col11 {\n",
+ " background-color: #b9cae0;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row13_col16 {\n",
+ " background-color: #db546f;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row13_col24, #T_cfd1f_row14_col4, #T_cfd1f_row15_col1 {\n",
+ " background-color: #5e8bc3;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row14_col7 {\n",
+ " background-color: #6b94c7;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row14_col14, #T_cfd1f_row15_col16 {\n",
+ " background-color: #e8abb7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row14_col17 {\n",
+ " background-color: #e7a4b1;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row14_col22 {\n",
+ " background-color: #99b4d6;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row15_col0, #T_cfd1f_row15_col7, #T_cfd1f_row19_col12 {\n",
+ " background-color: #5786c1;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row15_col11 {\n",
+ " background-color: #9bb6d7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row15_col12, #T_cfd1f_row17_col1 {\n",
+ " background-color: #739aca;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row15_col13, #T_cfd1f_row16_col11 {\n",
+ " background-color: #b1c5de;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row15_col22 {\n",
+ " background-color: #92b0d4;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row16_col0 {\n",
+ " background-color: #6892c6;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row16_col3, #T_cfd1f_row17_col19 {\n",
+ " background-color: #eeeff2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row16_col5 {\n",
+ " background-color: #e0e6ed;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row16_col12 {\n",
+ " background-color: #4b7ebd;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row16_col20, #T_cfd1f_row19_col11 {\n",
+ " background-color: #bdcde2;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row16_col24 {\n",
+ " background-color: #5a88c2;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row17_col9 {\n",
+ " background-color: #84a5cf;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row17_col12 {\n",
+ " background-color: #5f8cc3;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row18_col3 {\n",
+ " background-color: #e8aeba;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row18_col12 {\n",
+ " background-color: #4c7ebd;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row18_col15 {\n",
+ " background-color: #efd9dd;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row18_col22, #T_cfd1f_row19_col22 {\n",
+ " background-color: #9db7d7;\n",
+ " color: #000000;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row18_col24 {\n",
+ " background-color: #89a9d1;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row19_col1 {\n",
+ " background-color: #5b89c2;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "#T_cfd1f_row19_col24 {\n",
+ " background-color: #88a8d0;\n",
+ " color: #f1f1f1;\n",
+ " max-width: 80px;\n",
+ " font-size: 1pt;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_cfd1f\">\n",
+ " <caption>Hover to magnify</caption>\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_cfd1f_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_cfd1f_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_cfd1f_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " <th id=\"T_cfd1f_level0_col3\" class=\"col_heading level0 col3\" >3</th>\n",
+ " <th id=\"T_cfd1f_level0_col4\" class=\"col_heading level0 col4\" >4</th>\n",
+ " <th id=\"T_cfd1f_level0_col5\" class=\"col_heading level0 col5\" >5</th>\n",
+ " <th id=\"T_cfd1f_level0_col6\" class=\"col_heading level0 col6\" >6</th>\n",
+ " <th id=\"T_cfd1f_level0_col7\" class=\"col_heading level0 col7\" >7</th>\n",
+ " <th id=\"T_cfd1f_level0_col8\" class=\"col_heading level0 col8\" >8</th>\n",
+ " <th id=\"T_cfd1f_level0_col9\" class=\"col_heading level0 col9\" >9</th>\n",
+ " <th id=\"T_cfd1f_level0_col10\" class=\"col_heading level0 col10\" >10</th>\n",
+ " <th id=\"T_cfd1f_level0_col11\" class=\"col_heading level0 col11\" >11</th>\n",
+ " <th id=\"T_cfd1f_level0_col12\" class=\"col_heading level0 col12\" >12</th>\n",
+ " <th id=\"T_cfd1f_level0_col13\" class=\"col_heading level0 col13\" >13</th>\n",
+ " <th id=\"T_cfd1f_level0_col14\" class=\"col_heading level0 col14\" >14</th>\n",
+ " <th id=\"T_cfd1f_level0_col15\" class=\"col_heading level0 col15\" >15</th>\n",
+ " <th id=\"T_cfd1f_level0_col16\" class=\"col_heading level0 col16\" >16</th>\n",
+ " <th id=\"T_cfd1f_level0_col17\" class=\"col_heading level0 col17\" >17</th>\n",
+ " <th id=\"T_cfd1f_level0_col18\" class=\"col_heading level0 col18\" >18</th>\n",
+ " <th id=\"T_cfd1f_level0_col19\" class=\"col_heading level0 col19\" >19</th>\n",
+ " <th id=\"T_cfd1f_level0_col20\" class=\"col_heading level0 col20\" >20</th>\n",
+ " <th id=\"T_cfd1f_level0_col21\" class=\"col_heading level0 col21\" >21</th>\n",
+ " <th id=\"T_cfd1f_level0_col22\" class=\"col_heading level0 col22\" >22</th>\n",
+ " <th id=\"T_cfd1f_level0_col23\" class=\"col_heading level0 col23\" >23</th>\n",
+ " <th id=\"T_cfd1f_level0_col24\" class=\"col_heading level0 col24\" >24</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_cfd1f_row0_col0\" class=\"data row0 col0\" >0.23</td>\n",
+ " <td id=\"T_cfd1f_row0_col1\" class=\"data row0 col1\" >1.03</td>\n",
+ " <td id=\"T_cfd1f_row0_col2\" class=\"data row0 col2\" >-0.84</td>\n",
+ " <td id=\"T_cfd1f_row0_col3\" class=\"data row0 col3\" >-0.59</td>\n",
+ " <td id=\"T_cfd1f_row0_col4\" class=\"data row0 col4\" >-0.96</td>\n",
+ " <td id=\"T_cfd1f_row0_col5\" class=\"data row0 col5\" >-0.22</td>\n",
+ " <td id=\"T_cfd1f_row0_col6\" class=\"data row0 col6\" >-0.62</td>\n",
+ " <td id=\"T_cfd1f_row0_col7\" class=\"data row0 col7\" >1.84</td>\n",
+ " <td id=\"T_cfd1f_row0_col8\" class=\"data row0 col8\" >-2.05</td>\n",
+ " <td id=\"T_cfd1f_row0_col9\" class=\"data row0 col9\" >0.87</td>\n",
+ " <td id=\"T_cfd1f_row0_col10\" class=\"data row0 col10\" >-0.92</td>\n",
+ " <td id=\"T_cfd1f_row0_col11\" class=\"data row0 col11\" >-0.23</td>\n",
+ " <td id=\"T_cfd1f_row0_col12\" class=\"data row0 col12\" >2.15</td>\n",
+ " <td id=\"T_cfd1f_row0_col13\" class=\"data row0 col13\" >-1.33</td>\n",
+ " <td id=\"T_cfd1f_row0_col14\" class=\"data row0 col14\" >0.08</td>\n",
+ " <td id=\"T_cfd1f_row0_col15\" class=\"data row0 col15\" >-1.25</td>\n",
+ " <td id=\"T_cfd1f_row0_col16\" class=\"data row0 col16\" >1.20</td>\n",
+ " <td id=\"T_cfd1f_row0_col17\" class=\"data row0 col17\" >-1.05</td>\n",
+ " <td id=\"T_cfd1f_row0_col18\" class=\"data row0 col18\" >1.06</td>\n",
+ " <td id=\"T_cfd1f_row0_col19\" class=\"data row0 col19\" >-0.42</td>\n",
+ " <td id=\"T_cfd1f_row0_col20\" class=\"data row0 col20\" >2.29</td>\n",
+ " <td id=\"T_cfd1f_row0_col21\" class=\"data row0 col21\" >-2.59</td>\n",
+ " <td id=\"T_cfd1f_row0_col22\" class=\"data row0 col22\" >2.82</td>\n",
+ " <td id=\"T_cfd1f_row0_col23\" class=\"data row0 col23\" >0.68</td>\n",
+ " <td id=\"T_cfd1f_row0_col24\" class=\"data row0 col24\" >-1.58</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_cfd1f_row1_col0\" class=\"data row1 col0\" >-1.75</td>\n",
+ " <td id=\"T_cfd1f_row1_col1\" class=\"data row1 col1\" >1.56</td>\n",
+ " <td id=\"T_cfd1f_row1_col2\" class=\"data row1 col2\" >-1.13</td>\n",
+ " <td id=\"T_cfd1f_row1_col3\" class=\"data row1 col3\" >-1.10</td>\n",
+ " <td id=\"T_cfd1f_row1_col4\" class=\"data row1 col4\" >1.03</td>\n",
+ " <td id=\"T_cfd1f_row1_col5\" class=\"data row1 col5\" >0.00</td>\n",
+ " <td id=\"T_cfd1f_row1_col6\" class=\"data row1 col6\" >-2.46</td>\n",
+ " <td id=\"T_cfd1f_row1_col7\" class=\"data row1 col7\" >3.45</td>\n",
+ " <td id=\"T_cfd1f_row1_col8\" class=\"data row1 col8\" >-1.66</td>\n",
+ " <td id=\"T_cfd1f_row1_col9\" class=\"data row1 col9\" >1.27</td>\n",
+ " <td id=\"T_cfd1f_row1_col10\" class=\"data row1 col10\" >-0.52</td>\n",
+ " <td id=\"T_cfd1f_row1_col11\" class=\"data row1 col11\" >-0.02</td>\n",
+ " <td id=\"T_cfd1f_row1_col12\" class=\"data row1 col12\" >1.52</td>\n",
+ " <td id=\"T_cfd1f_row1_col13\" class=\"data row1 col13\" >-1.09</td>\n",
+ " <td id=\"T_cfd1f_row1_col14\" class=\"data row1 col14\" >-1.86</td>\n",
+ " <td id=\"T_cfd1f_row1_col15\" class=\"data row1 col15\" >-1.13</td>\n",
+ " <td id=\"T_cfd1f_row1_col16\" class=\"data row1 col16\" >-0.68</td>\n",
+ " <td id=\"T_cfd1f_row1_col17\" class=\"data row1 col17\" >-0.81</td>\n",
+ " <td id=\"T_cfd1f_row1_col18\" class=\"data row1 col18\" >0.35</td>\n",
+ " <td id=\"T_cfd1f_row1_col19\" class=\"data row1 col19\" >-0.06</td>\n",
+ " <td id=\"T_cfd1f_row1_col20\" class=\"data row1 col20\" >1.79</td>\n",
+ " <td id=\"T_cfd1f_row1_col21\" class=\"data row1 col21\" >-2.82</td>\n",
+ " <td id=\"T_cfd1f_row1_col22\" class=\"data row1 col22\" >2.26</td>\n",
+ " <td id=\"T_cfd1f_row1_col23\" class=\"data row1 col23\" >0.78</td>\n",
+ " <td id=\"T_cfd1f_row1_col24\" class=\"data row1 col24\" >0.44</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_cfd1f_row2_col0\" class=\"data row2 col0\" >-0.65</td>\n",
+ " <td id=\"T_cfd1f_row2_col1\" class=\"data row2 col1\" >3.22</td>\n",
+ " <td id=\"T_cfd1f_row2_col2\" class=\"data row2 col2\" >-1.76</td>\n",
+ " <td id=\"T_cfd1f_row2_col3\" class=\"data row2 col3\" >0.52</td>\n",
+ " <td id=\"T_cfd1f_row2_col4\" class=\"data row2 col4\" >2.20</td>\n",
+ " <td id=\"T_cfd1f_row2_col5\" class=\"data row2 col5\" >-0.37</td>\n",
+ " <td id=\"T_cfd1f_row2_col6\" class=\"data row2 col6\" >-3.00</td>\n",
+ " <td id=\"T_cfd1f_row2_col7\" class=\"data row2 col7\" >3.73</td>\n",
+ " <td id=\"T_cfd1f_row2_col8\" class=\"data row2 col8\" >-1.87</td>\n",
+ " <td id=\"T_cfd1f_row2_col9\" class=\"data row2 col9\" >2.46</td>\n",
+ " <td id=\"T_cfd1f_row2_col10\" class=\"data row2 col10\" >0.21</td>\n",
+ " <td id=\"T_cfd1f_row2_col11\" class=\"data row2 col11\" >-0.24</td>\n",
+ " <td id=\"T_cfd1f_row2_col12\" class=\"data row2 col12\" >-0.10</td>\n",
+ " <td id=\"T_cfd1f_row2_col13\" class=\"data row2 col13\" >-0.78</td>\n",
+ " <td id=\"T_cfd1f_row2_col14\" class=\"data row2 col14\" >-3.02</td>\n",
+ " <td id=\"T_cfd1f_row2_col15\" class=\"data row2 col15\" >-0.82</td>\n",
+ " <td id=\"T_cfd1f_row2_col16\" class=\"data row2 col16\" >-0.21</td>\n",
+ " <td id=\"T_cfd1f_row2_col17\" class=\"data row2 col17\" >-0.23</td>\n",
+ " <td id=\"T_cfd1f_row2_col18\" class=\"data row2 col18\" >0.86</td>\n",
+ " <td id=\"T_cfd1f_row2_col19\" class=\"data row2 col19\" >-0.68</td>\n",
+ " <td id=\"T_cfd1f_row2_col20\" class=\"data row2 col20\" >1.45</td>\n",
+ " <td id=\"T_cfd1f_row2_col21\" class=\"data row2 col21\" >-4.89</td>\n",
+ " <td id=\"T_cfd1f_row2_col22\" class=\"data row2 col22\" >3.03</td>\n",
+ " <td id=\"T_cfd1f_row2_col23\" class=\"data row2 col23\" >1.91</td>\n",
+ " <td id=\"T_cfd1f_row2_col24\" class=\"data row2 col24\" >0.61</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_cfd1f_row3_col0\" class=\"data row3 col0\" >-1.62</td>\n",
+ " <td id=\"T_cfd1f_row3_col1\" class=\"data row3 col1\" >3.71</td>\n",
+ " <td id=\"T_cfd1f_row3_col2\" class=\"data row3 col2\" >-2.31</td>\n",
+ " <td id=\"T_cfd1f_row3_col3\" class=\"data row3 col3\" >0.43</td>\n",
+ " <td id=\"T_cfd1f_row3_col4\" class=\"data row3 col4\" >4.17</td>\n",
+ " <td id=\"T_cfd1f_row3_col5\" class=\"data row3 col5\" >-0.43</td>\n",
+ " <td id=\"T_cfd1f_row3_col6\" class=\"data row3 col6\" >-3.86</td>\n",
+ " <td id=\"T_cfd1f_row3_col7\" class=\"data row3 col7\" >4.16</td>\n",
+ " <td id=\"T_cfd1f_row3_col8\" class=\"data row3 col8\" >-2.15</td>\n",
+ " <td id=\"T_cfd1f_row3_col9\" class=\"data row3 col9\" >1.08</td>\n",
+ " <td id=\"T_cfd1f_row3_col10\" class=\"data row3 col10\" >0.12</td>\n",
+ " <td id=\"T_cfd1f_row3_col11\" class=\"data row3 col11\" >0.60</td>\n",
+ " <td id=\"T_cfd1f_row3_col12\" class=\"data row3 col12\" >-0.89</td>\n",
+ " <td id=\"T_cfd1f_row3_col13\" class=\"data row3 col13\" >0.27</td>\n",
+ " <td id=\"T_cfd1f_row3_col14\" class=\"data row3 col14\" >-3.67</td>\n",
+ " <td id=\"T_cfd1f_row3_col15\" class=\"data row3 col15\" >-2.71</td>\n",
+ " <td id=\"T_cfd1f_row3_col16\" class=\"data row3 col16\" >-0.31</td>\n",
+ " <td id=\"T_cfd1f_row3_col17\" class=\"data row3 col17\" >-1.59</td>\n",
+ " <td id=\"T_cfd1f_row3_col18\" class=\"data row3 col18\" >1.35</td>\n",
+ " <td id=\"T_cfd1f_row3_col19\" class=\"data row3 col19\" >-1.83</td>\n",
+ " <td id=\"T_cfd1f_row3_col20\" class=\"data row3 col20\" >0.91</td>\n",
+ " <td id=\"T_cfd1f_row3_col21\" class=\"data row3 col21\" >-5.80</td>\n",
+ " <td id=\"T_cfd1f_row3_col22\" class=\"data row3 col22\" >2.81</td>\n",
+ " <td id=\"T_cfd1f_row3_col23\" class=\"data row3 col23\" >2.11</td>\n",
+ " <td id=\"T_cfd1f_row3_col24\" class=\"data row3 col24\" >0.28</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_cfd1f_row4_col0\" class=\"data row4 col0\" >-3.35</td>\n",
+ " <td id=\"T_cfd1f_row4_col1\" class=\"data row4 col1\" >4.48</td>\n",
+ " <td id=\"T_cfd1f_row4_col2\" class=\"data row4 col2\" >-1.86</td>\n",
+ " <td id=\"T_cfd1f_row4_col3\" class=\"data row4 col3\" >-1.70</td>\n",
+ " <td id=\"T_cfd1f_row4_col4\" class=\"data row4 col4\" >5.19</td>\n",
+ " <td id=\"T_cfd1f_row4_col5\" class=\"data row4 col5\" >-1.02</td>\n",
+ " <td id=\"T_cfd1f_row4_col6\" class=\"data row4 col6\" >-3.81</td>\n",
+ " <td id=\"T_cfd1f_row4_col7\" class=\"data row4 col7\" >4.72</td>\n",
+ " <td id=\"T_cfd1f_row4_col8\" class=\"data row4 col8\" >-0.72</td>\n",
+ " <td id=\"T_cfd1f_row4_col9\" class=\"data row4 col9\" >1.08</td>\n",
+ " <td id=\"T_cfd1f_row4_col10\" class=\"data row4 col10\" >-0.18</td>\n",
+ " <td id=\"T_cfd1f_row4_col11\" class=\"data row4 col11\" >0.83</td>\n",
+ " <td id=\"T_cfd1f_row4_col12\" class=\"data row4 col12\" >-0.22</td>\n",
+ " <td id=\"T_cfd1f_row4_col13\" class=\"data row4 col13\" >-1.08</td>\n",
+ " <td id=\"T_cfd1f_row4_col14\" class=\"data row4 col14\" >-4.27</td>\n",
+ " <td id=\"T_cfd1f_row4_col15\" class=\"data row4 col15\" >-2.88</td>\n",
+ " <td id=\"T_cfd1f_row4_col16\" class=\"data row4 col16\" >-0.97</td>\n",
+ " <td id=\"T_cfd1f_row4_col17\" class=\"data row4 col17\" >-1.78</td>\n",
+ " <td id=\"T_cfd1f_row4_col18\" class=\"data row4 col18\" >1.53</td>\n",
+ " <td id=\"T_cfd1f_row4_col19\" class=\"data row4 col19\" >-1.80</td>\n",
+ " <td id=\"T_cfd1f_row4_col20\" class=\"data row4 col20\" >2.21</td>\n",
+ " <td id=\"T_cfd1f_row4_col21\" class=\"data row4 col21\" >-6.34</td>\n",
+ " <td id=\"T_cfd1f_row4_col22\" class=\"data row4 col22\" >3.34</td>\n",
+ " <td id=\"T_cfd1f_row4_col23\" class=\"data row4 col23\" >2.49</td>\n",
+ " <td id=\"T_cfd1f_row4_col24\" class=\"data row4 col24\" >2.09</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_cfd1f_row5_col0\" class=\"data row5 col0\" >-0.84</td>\n",
+ " <td id=\"T_cfd1f_row5_col1\" class=\"data row5 col1\" >4.23</td>\n",
+ " <td id=\"T_cfd1f_row5_col2\" class=\"data row5 col2\" >-1.65</td>\n",
+ " <td id=\"T_cfd1f_row5_col3\" class=\"data row5 col3\" >-2.00</td>\n",
+ " <td id=\"T_cfd1f_row5_col4\" class=\"data row5 col4\" >5.34</td>\n",
+ " <td id=\"T_cfd1f_row5_col5\" class=\"data row5 col5\" >-0.99</td>\n",
+ " <td id=\"T_cfd1f_row5_col6\" class=\"data row5 col6\" >-4.13</td>\n",
+ " <td id=\"T_cfd1f_row5_col7\" class=\"data row5 col7\" >3.94</td>\n",
+ " <td id=\"T_cfd1f_row5_col8\" class=\"data row5 col8\" >-1.06</td>\n",
+ " <td id=\"T_cfd1f_row5_col9\" class=\"data row5 col9\" >-0.94</td>\n",
+ " <td id=\"T_cfd1f_row5_col10\" class=\"data row5 col10\" >1.24</td>\n",
+ " <td id=\"T_cfd1f_row5_col11\" class=\"data row5 col11\" >0.09</td>\n",
+ " <td id=\"T_cfd1f_row5_col12\" class=\"data row5 col12\" >-1.78</td>\n",
+ " <td id=\"T_cfd1f_row5_col13\" class=\"data row5 col13\" >-0.11</td>\n",
+ " <td id=\"T_cfd1f_row5_col14\" class=\"data row5 col14\" >-4.45</td>\n",
+ " <td id=\"T_cfd1f_row5_col15\" class=\"data row5 col15\" >-0.85</td>\n",
+ " <td id=\"T_cfd1f_row5_col16\" class=\"data row5 col16\" >-2.06</td>\n",
+ " <td id=\"T_cfd1f_row5_col17\" class=\"data row5 col17\" >-1.35</td>\n",
+ " <td id=\"T_cfd1f_row5_col18\" class=\"data row5 col18\" >0.80</td>\n",
+ " <td id=\"T_cfd1f_row5_col19\" class=\"data row5 col19\" >-1.63</td>\n",
+ " <td id=\"T_cfd1f_row5_col20\" class=\"data row5 col20\" >1.54</td>\n",
+ " <td id=\"T_cfd1f_row5_col21\" class=\"data row5 col21\" >-6.51</td>\n",
+ " <td id=\"T_cfd1f_row5_col22\" class=\"data row5 col22\" >2.80</td>\n",
+ " <td id=\"T_cfd1f_row5_col23\" class=\"data row5 col23\" >2.14</td>\n",
+ " <td id=\"T_cfd1f_row5_col24\" class=\"data row5 col24\" >3.77</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_cfd1f_row6_col0\" class=\"data row6 col0\" >-0.74</td>\n",
+ " <td id=\"T_cfd1f_row6_col1\" class=\"data row6 col1\" >5.35</td>\n",
+ " <td id=\"T_cfd1f_row6_col2\" class=\"data row6 col2\" >-2.11</td>\n",
+ " <td id=\"T_cfd1f_row6_col3\" class=\"data row6 col3\" >-1.13</td>\n",
+ " <td id=\"T_cfd1f_row6_col4\" class=\"data row6 col4\" >4.20</td>\n",
+ " <td id=\"T_cfd1f_row6_col5\" class=\"data row6 col5\" >-1.85</td>\n",
+ " <td id=\"T_cfd1f_row6_col6\" class=\"data row6 col6\" >-3.20</td>\n",
+ " <td id=\"T_cfd1f_row6_col7\" class=\"data row6 col7\" >3.76</td>\n",
+ " <td id=\"T_cfd1f_row6_col8\" class=\"data row6 col8\" >-3.22</td>\n",
+ " <td id=\"T_cfd1f_row6_col9\" class=\"data row6 col9\" >-1.23</td>\n",
+ " <td id=\"T_cfd1f_row6_col10\" class=\"data row6 col10\" >0.34</td>\n",
+ " <td id=\"T_cfd1f_row6_col11\" class=\"data row6 col11\" >0.57</td>\n",
+ " <td id=\"T_cfd1f_row6_col12\" class=\"data row6 col12\" >-1.82</td>\n",
+ " <td id=\"T_cfd1f_row6_col13\" class=\"data row6 col13\" >0.54</td>\n",
+ " <td id=\"T_cfd1f_row6_col14\" class=\"data row6 col14\" >-4.43</td>\n",
+ " <td id=\"T_cfd1f_row6_col15\" class=\"data row6 col15\" >-1.83</td>\n",
+ " <td id=\"T_cfd1f_row6_col16\" class=\"data row6 col16\" >-4.03</td>\n",
+ " <td id=\"T_cfd1f_row6_col17\" class=\"data row6 col17\" >-2.62</td>\n",
+ " <td id=\"T_cfd1f_row6_col18\" class=\"data row6 col18\" >-0.20</td>\n",
+ " <td id=\"T_cfd1f_row6_col19\" class=\"data row6 col19\" >-4.68</td>\n",
+ " <td id=\"T_cfd1f_row6_col20\" class=\"data row6 col20\" >1.93</td>\n",
+ " <td id=\"T_cfd1f_row6_col21\" class=\"data row6 col21\" >-8.46</td>\n",
+ " <td id=\"T_cfd1f_row6_col22\" class=\"data row6 col22\" >3.34</td>\n",
+ " <td id=\"T_cfd1f_row6_col23\" class=\"data row6 col23\" >2.52</td>\n",
+ " <td id=\"T_cfd1f_row6_col24\" class=\"data row6 col24\" >5.81</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_cfd1f_row7_col0\" class=\"data row7 col0\" >-0.44</td>\n",
+ " <td id=\"T_cfd1f_row7_col1\" class=\"data row7 col1\" >4.69</td>\n",
+ " <td id=\"T_cfd1f_row7_col2\" class=\"data row7 col2\" >-2.30</td>\n",
+ " <td id=\"T_cfd1f_row7_col3\" class=\"data row7 col3\" >-0.21</td>\n",
+ " <td id=\"T_cfd1f_row7_col4\" class=\"data row7 col4\" >5.93</td>\n",
+ " <td id=\"T_cfd1f_row7_col5\" class=\"data row7 col5\" >-2.63</td>\n",
+ " <td id=\"T_cfd1f_row7_col6\" class=\"data row7 col6\" >-1.83</td>\n",
+ " <td id=\"T_cfd1f_row7_col7\" class=\"data row7 col7\" >5.46</td>\n",
+ " <td id=\"T_cfd1f_row7_col8\" class=\"data row7 col8\" >-4.50</td>\n",
+ " <td id=\"T_cfd1f_row7_col9\" class=\"data row7 col9\" >-3.16</td>\n",
+ " <td id=\"T_cfd1f_row7_col10\" class=\"data row7 col10\" >-1.73</td>\n",
+ " <td id=\"T_cfd1f_row7_col11\" class=\"data row7 col11\" >0.18</td>\n",
+ " <td id=\"T_cfd1f_row7_col12\" class=\"data row7 col12\" >0.11</td>\n",
+ " <td id=\"T_cfd1f_row7_col13\" class=\"data row7 col13\" >0.04</td>\n",
+ " <td id=\"T_cfd1f_row7_col14\" class=\"data row7 col14\" >-5.99</td>\n",
+ " <td id=\"T_cfd1f_row7_col15\" class=\"data row7 col15\" >-0.45</td>\n",
+ " <td id=\"T_cfd1f_row7_col16\" class=\"data row7 col16\" >-6.20</td>\n",
+ " <td id=\"T_cfd1f_row7_col17\" class=\"data row7 col17\" >-3.89</td>\n",
+ " <td id=\"T_cfd1f_row7_col18\" class=\"data row7 col18\" >0.71</td>\n",
+ " <td id=\"T_cfd1f_row7_col19\" class=\"data row7 col19\" >-3.95</td>\n",
+ " <td id=\"T_cfd1f_row7_col20\" class=\"data row7 col20\" >0.67</td>\n",
+ " <td id=\"T_cfd1f_row7_col21\" class=\"data row7 col21\" >-7.26</td>\n",
+ " <td id=\"T_cfd1f_row7_col22\" class=\"data row7 col22\" >2.97</td>\n",
+ " <td id=\"T_cfd1f_row7_col23\" class=\"data row7 col23\" >3.39</td>\n",
+ " <td id=\"T_cfd1f_row7_col24\" class=\"data row7 col24\" >6.66</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_cfd1f_row8_col0\" class=\"data row8 col0\" >0.92</td>\n",
+ " <td id=\"T_cfd1f_row8_col1\" class=\"data row8 col1\" >5.80</td>\n",
+ " <td id=\"T_cfd1f_row8_col2\" class=\"data row8 col2\" >-3.33</td>\n",
+ " <td id=\"T_cfd1f_row8_col3\" class=\"data row8 col3\" >-0.65</td>\n",
+ " <td id=\"T_cfd1f_row8_col4\" class=\"data row8 col4\" >5.99</td>\n",
+ " <td id=\"T_cfd1f_row8_col5\" class=\"data row8 col5\" >-3.19</td>\n",
+ " <td id=\"T_cfd1f_row8_col6\" class=\"data row8 col6\" >-1.83</td>\n",
+ " <td id=\"T_cfd1f_row8_col7\" class=\"data row8 col7\" >5.63</td>\n",
+ " <td id=\"T_cfd1f_row8_col8\" class=\"data row8 col8\" >-3.53</td>\n",
+ " <td id=\"T_cfd1f_row8_col9\" class=\"data row8 col9\" >-1.30</td>\n",
+ " <td id=\"T_cfd1f_row8_col10\" class=\"data row8 col10\" >-1.61</td>\n",
+ " <td id=\"T_cfd1f_row8_col11\" class=\"data row8 col11\" >0.82</td>\n",
+ " <td id=\"T_cfd1f_row8_col12\" class=\"data row8 col12\" >-2.45</td>\n",
+ " <td id=\"T_cfd1f_row8_col13\" class=\"data row8 col13\" >-0.40</td>\n",
+ " <td id=\"T_cfd1f_row8_col14\" class=\"data row8 col14\" >-6.06</td>\n",
+ " <td id=\"T_cfd1f_row8_col15\" class=\"data row8 col15\" >-0.52</td>\n",
+ " <td id=\"T_cfd1f_row8_col16\" class=\"data row8 col16\" >-6.60</td>\n",
+ " <td id=\"T_cfd1f_row8_col17\" class=\"data row8 col17\" >-3.48</td>\n",
+ " <td id=\"T_cfd1f_row8_col18\" class=\"data row8 col18\" >-0.04</td>\n",
+ " <td id=\"T_cfd1f_row8_col19\" class=\"data row8 col19\" >-4.60</td>\n",
+ " <td id=\"T_cfd1f_row8_col20\" class=\"data row8 col20\" >0.51</td>\n",
+ " <td id=\"T_cfd1f_row8_col21\" class=\"data row8 col21\" >-5.85</td>\n",
+ " <td id=\"T_cfd1f_row8_col22\" class=\"data row8 col22\" >3.23</td>\n",
+ " <td id=\"T_cfd1f_row8_col23\" class=\"data row8 col23\" >2.40</td>\n",
+ " <td id=\"T_cfd1f_row8_col24\" class=\"data row8 col24\" >5.08</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_cfd1f_row9_col0\" class=\"data row9 col0\" >0.38</td>\n",
+ " <td id=\"T_cfd1f_row9_col1\" class=\"data row9 col1\" >5.54</td>\n",
+ " <td id=\"T_cfd1f_row9_col2\" class=\"data row9 col2\" >-4.49</td>\n",
+ " <td id=\"T_cfd1f_row9_col3\" class=\"data row9 col3\" >-0.80</td>\n",
+ " <td id=\"T_cfd1f_row9_col4\" class=\"data row9 col4\" >7.05</td>\n",
+ " <td id=\"T_cfd1f_row9_col5\" class=\"data row9 col5\" >-2.64</td>\n",
+ " <td id=\"T_cfd1f_row9_col6\" class=\"data row9 col6\" >-0.44</td>\n",
+ " <td id=\"T_cfd1f_row9_col7\" class=\"data row9 col7\" >5.35</td>\n",
+ " <td id=\"T_cfd1f_row9_col8\" class=\"data row9 col8\" >-1.96</td>\n",
+ " <td id=\"T_cfd1f_row9_col9\" class=\"data row9 col9\" >-0.33</td>\n",
+ " <td id=\"T_cfd1f_row9_col10\" class=\"data row9 col10\" >-0.80</td>\n",
+ " <td id=\"T_cfd1f_row9_col11\" class=\"data row9 col11\" >0.26</td>\n",
+ " <td id=\"T_cfd1f_row9_col12\" class=\"data row9 col12\" >-3.37</td>\n",
+ " <td id=\"T_cfd1f_row9_col13\" class=\"data row9 col13\" >-0.82</td>\n",
+ " <td id=\"T_cfd1f_row9_col14\" class=\"data row9 col14\" >-6.05</td>\n",
+ " <td id=\"T_cfd1f_row9_col15\" class=\"data row9 col15\" >-2.61</td>\n",
+ " <td id=\"T_cfd1f_row9_col16\" class=\"data row9 col16\" >-8.45</td>\n",
+ " <td id=\"T_cfd1f_row9_col17\" class=\"data row9 col17\" >-4.45</td>\n",
+ " <td id=\"T_cfd1f_row9_col18\" class=\"data row9 col18\" >0.41</td>\n",
+ " <td id=\"T_cfd1f_row9_col19\" class=\"data row9 col19\" >-4.71</td>\n",
+ " <td id=\"T_cfd1f_row9_col20\" class=\"data row9 col20\" >1.89</td>\n",
+ " <td id=\"T_cfd1f_row9_col21\" class=\"data row9 col21\" >-6.93</td>\n",
+ " <td id=\"T_cfd1f_row9_col22\" class=\"data row9 col22\" >2.14</td>\n",
+ " <td id=\"T_cfd1f_row9_col23\" class=\"data row9 col23\" >3.00</td>\n",
+ " <td id=\"T_cfd1f_row9_col24\" class=\"data row9 col24\" >5.16</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row10\" class=\"row_heading level0 row10\" >10</th>\n",
+ " <td id=\"T_cfd1f_row10_col0\" class=\"data row10 col0\" >2.06</td>\n",
+ " <td id=\"T_cfd1f_row10_col1\" class=\"data row10 col1\" >5.84</td>\n",
+ " <td id=\"T_cfd1f_row10_col2\" class=\"data row10 col2\" >-3.90</td>\n",
+ " <td id=\"T_cfd1f_row10_col3\" class=\"data row10 col3\" >-0.98</td>\n",
+ " <td id=\"T_cfd1f_row10_col4\" class=\"data row10 col4\" >7.78</td>\n",
+ " <td id=\"T_cfd1f_row10_col5\" class=\"data row10 col5\" >-2.49</td>\n",
+ " <td id=\"T_cfd1f_row10_col6\" class=\"data row10 col6\" >-0.59</td>\n",
+ " <td id=\"T_cfd1f_row10_col7\" class=\"data row10 col7\" >5.59</td>\n",
+ " <td id=\"T_cfd1f_row10_col8\" class=\"data row10 col8\" >-2.22</td>\n",
+ " <td id=\"T_cfd1f_row10_col9\" class=\"data row10 col9\" >-0.71</td>\n",
+ " <td id=\"T_cfd1f_row10_col10\" class=\"data row10 col10\" >-0.46</td>\n",
+ " <td id=\"T_cfd1f_row10_col11\" class=\"data row10 col11\" >1.80</td>\n",
+ " <td id=\"T_cfd1f_row10_col12\" class=\"data row10 col12\" >-2.79</td>\n",
+ " <td id=\"T_cfd1f_row10_col13\" class=\"data row10 col13\" >0.48</td>\n",
+ " <td id=\"T_cfd1f_row10_col14\" class=\"data row10 col14\" >-5.97</td>\n",
+ " <td id=\"T_cfd1f_row10_col15\" class=\"data row10 col15\" >-3.44</td>\n",
+ " <td id=\"T_cfd1f_row10_col16\" class=\"data row10 col16\" >-7.77</td>\n",
+ " <td id=\"T_cfd1f_row10_col17\" class=\"data row10 col17\" >-5.49</td>\n",
+ " <td id=\"T_cfd1f_row10_col18\" class=\"data row10 col18\" >-0.70</td>\n",
+ " <td id=\"T_cfd1f_row10_col19\" class=\"data row10 col19\" >-4.61</td>\n",
+ " <td id=\"T_cfd1f_row10_col20\" class=\"data row10 col20\" >-0.52</td>\n",
+ " <td id=\"T_cfd1f_row10_col21\" class=\"data row10 col21\" >-7.72</td>\n",
+ " <td id=\"T_cfd1f_row10_col22\" class=\"data row10 col22\" >1.54</td>\n",
+ " <td id=\"T_cfd1f_row10_col23\" class=\"data row10 col23\" >5.02</td>\n",
+ " <td id=\"T_cfd1f_row10_col24\" class=\"data row10 col24\" >5.81</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row11\" class=\"row_heading level0 row11\" >11</th>\n",
+ " <td id=\"T_cfd1f_row11_col0\" class=\"data row11 col0\" >1.86</td>\n",
+ " <td id=\"T_cfd1f_row11_col1\" class=\"data row11 col1\" >4.47</td>\n",
+ " <td id=\"T_cfd1f_row11_col2\" class=\"data row11 col2\" >-2.17</td>\n",
+ " <td id=\"T_cfd1f_row11_col3\" class=\"data row11 col3\" >-1.38</td>\n",
+ " <td id=\"T_cfd1f_row11_col4\" class=\"data row11 col4\" >5.90</td>\n",
+ " <td id=\"T_cfd1f_row11_col5\" class=\"data row11 col5\" >-0.49</td>\n",
+ " <td id=\"T_cfd1f_row11_col6\" class=\"data row11 col6\" >0.02</td>\n",
+ " <td id=\"T_cfd1f_row11_col7\" class=\"data row11 col7\" >5.78</td>\n",
+ " <td id=\"T_cfd1f_row11_col8\" class=\"data row11 col8\" >-1.04</td>\n",
+ " <td id=\"T_cfd1f_row11_col9\" class=\"data row11 col9\" >-0.60</td>\n",
+ " <td id=\"T_cfd1f_row11_col10\" class=\"data row11 col10\" >0.49</td>\n",
+ " <td id=\"T_cfd1f_row11_col11\" class=\"data row11 col11\" >1.96</td>\n",
+ " <td id=\"T_cfd1f_row11_col12\" class=\"data row11 col12\" >-1.47</td>\n",
+ " <td id=\"T_cfd1f_row11_col13\" class=\"data row11 col13\" >1.88</td>\n",
+ " <td id=\"T_cfd1f_row11_col14\" class=\"data row11 col14\" >-5.92</td>\n",
+ " <td id=\"T_cfd1f_row11_col15\" class=\"data row11 col15\" >-4.55</td>\n",
+ " <td id=\"T_cfd1f_row11_col16\" class=\"data row11 col16\" >-8.15</td>\n",
+ " <td id=\"T_cfd1f_row11_col17\" class=\"data row11 col17\" >-3.42</td>\n",
+ " <td id=\"T_cfd1f_row11_col18\" class=\"data row11 col18\" >-2.24</td>\n",
+ " <td id=\"T_cfd1f_row11_col19\" class=\"data row11 col19\" >-4.33</td>\n",
+ " <td id=\"T_cfd1f_row11_col20\" class=\"data row11 col20\" >-1.17</td>\n",
+ " <td id=\"T_cfd1f_row11_col21\" class=\"data row11 col21\" >-7.90</td>\n",
+ " <td id=\"T_cfd1f_row11_col22\" class=\"data row11 col22\" >1.36</td>\n",
+ " <td id=\"T_cfd1f_row11_col23\" class=\"data row11 col23\" >5.31</td>\n",
+ " <td id=\"T_cfd1f_row11_col24\" class=\"data row11 col24\" >5.83</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row12\" class=\"row_heading level0 row12\" >12</th>\n",
+ " <td id=\"T_cfd1f_row12_col0\" class=\"data row12 col0\" >3.19</td>\n",
+ " <td id=\"T_cfd1f_row12_col1\" class=\"data row12 col1\" >4.22</td>\n",
+ " <td id=\"T_cfd1f_row12_col2\" class=\"data row12 col2\" >-3.06</td>\n",
+ " <td id=\"T_cfd1f_row12_col3\" class=\"data row12 col3\" >-2.27</td>\n",
+ " <td id=\"T_cfd1f_row12_col4\" class=\"data row12 col4\" >5.93</td>\n",
+ " <td id=\"T_cfd1f_row12_col5\" class=\"data row12 col5\" >-2.64</td>\n",
+ " <td id=\"T_cfd1f_row12_col6\" class=\"data row12 col6\" >0.33</td>\n",
+ " <td id=\"T_cfd1f_row12_col7\" class=\"data row12 col7\" >6.72</td>\n",
+ " <td id=\"T_cfd1f_row12_col8\" class=\"data row12 col8\" >-2.84</td>\n",
+ " <td id=\"T_cfd1f_row12_col9\" class=\"data row12 col9\" >-0.20</td>\n",
+ " <td id=\"T_cfd1f_row12_col10\" class=\"data row12 col10\" >1.89</td>\n",
+ " <td id=\"T_cfd1f_row12_col11\" class=\"data row12 col11\" >2.63</td>\n",
+ " <td id=\"T_cfd1f_row12_col12\" class=\"data row12 col12\" >-1.53</td>\n",
+ " <td id=\"T_cfd1f_row12_col13\" class=\"data row12 col13\" >0.75</td>\n",
+ " <td id=\"T_cfd1f_row12_col14\" class=\"data row12 col14\" >-5.27</td>\n",
+ " <td id=\"T_cfd1f_row12_col15\" class=\"data row12 col15\" >-4.53</td>\n",
+ " <td id=\"T_cfd1f_row12_col16\" class=\"data row12 col16\" >-7.57</td>\n",
+ " <td id=\"T_cfd1f_row12_col17\" class=\"data row12 col17\" >-2.85</td>\n",
+ " <td id=\"T_cfd1f_row12_col18\" class=\"data row12 col18\" >-2.17</td>\n",
+ " <td id=\"T_cfd1f_row12_col19\" class=\"data row12 col19\" >-4.78</td>\n",
+ " <td id=\"T_cfd1f_row12_col20\" class=\"data row12 col20\" >-1.13</td>\n",
+ " <td id=\"T_cfd1f_row12_col21\" class=\"data row12 col21\" >-8.99</td>\n",
+ " <td id=\"T_cfd1f_row12_col22\" class=\"data row12 col22\" >2.11</td>\n",
+ " <td id=\"T_cfd1f_row12_col23\" class=\"data row12 col23\" >6.42</td>\n",
+ " <td id=\"T_cfd1f_row12_col24\" class=\"data row12 col24\" >5.60</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row13\" class=\"row_heading level0 row13\" >13</th>\n",
+ " <td id=\"T_cfd1f_row13_col0\" class=\"data row13 col0\" >2.31</td>\n",
+ " <td id=\"T_cfd1f_row13_col1\" class=\"data row13 col1\" >4.45</td>\n",
+ " <td id=\"T_cfd1f_row13_col2\" class=\"data row13 col2\" >-3.87</td>\n",
+ " <td id=\"T_cfd1f_row13_col3\" class=\"data row13 col3\" >-2.05</td>\n",
+ " <td id=\"T_cfd1f_row13_col4\" class=\"data row13 col4\" >6.76</td>\n",
+ " <td id=\"T_cfd1f_row13_col5\" class=\"data row13 col5\" >-3.25</td>\n",
+ " <td id=\"T_cfd1f_row13_col6\" class=\"data row13 col6\" >-2.17</td>\n",
+ " <td id=\"T_cfd1f_row13_col7\" class=\"data row13 col7\" >7.99</td>\n",
+ " <td id=\"T_cfd1f_row13_col8\" class=\"data row13 col8\" >-2.56</td>\n",
+ " <td id=\"T_cfd1f_row13_col9\" class=\"data row13 col9\" >-0.80</td>\n",
+ " <td id=\"T_cfd1f_row13_col10\" class=\"data row13 col10\" >0.71</td>\n",
+ " <td id=\"T_cfd1f_row13_col11\" class=\"data row13 col11\" >2.33</td>\n",
+ " <td id=\"T_cfd1f_row13_col12\" class=\"data row13 col12\" >-0.16</td>\n",
+ " <td id=\"T_cfd1f_row13_col13\" class=\"data row13 col13\" >-0.46</td>\n",
+ " <td id=\"T_cfd1f_row13_col14\" class=\"data row13 col14\" >-5.10</td>\n",
+ " <td id=\"T_cfd1f_row13_col15\" class=\"data row13 col15\" >-3.79</td>\n",
+ " <td id=\"T_cfd1f_row13_col16\" class=\"data row13 col16\" >-7.58</td>\n",
+ " <td id=\"T_cfd1f_row13_col17\" class=\"data row13 col17\" >-4.00</td>\n",
+ " <td id=\"T_cfd1f_row13_col18\" class=\"data row13 col18\" >0.33</td>\n",
+ " <td id=\"T_cfd1f_row13_col19\" class=\"data row13 col19\" >-3.67</td>\n",
+ " <td id=\"T_cfd1f_row13_col20\" class=\"data row13 col20\" >-1.05</td>\n",
+ " <td id=\"T_cfd1f_row13_col21\" class=\"data row13 col21\" >-8.71</td>\n",
+ " <td id=\"T_cfd1f_row13_col22\" class=\"data row13 col22\" >2.47</td>\n",
+ " <td id=\"T_cfd1f_row13_col23\" class=\"data row13 col23\" >5.87</td>\n",
+ " <td id=\"T_cfd1f_row13_col24\" class=\"data row13 col24\" >6.71</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row14\" class=\"row_heading level0 row14\" >14</th>\n",
+ " <td id=\"T_cfd1f_row14_col0\" class=\"data row14 col0\" >3.78</td>\n",
+ " <td id=\"T_cfd1f_row14_col1\" class=\"data row14 col1\" >4.33</td>\n",
+ " <td id=\"T_cfd1f_row14_col2\" class=\"data row14 col2\" >-3.88</td>\n",
+ " <td id=\"T_cfd1f_row14_col3\" class=\"data row14 col3\" >-1.58</td>\n",
+ " <td id=\"T_cfd1f_row14_col4\" class=\"data row14 col4\" >6.22</td>\n",
+ " <td id=\"T_cfd1f_row14_col5\" class=\"data row14 col5\" >-3.23</td>\n",
+ " <td id=\"T_cfd1f_row14_col6\" class=\"data row14 col6\" >-1.46</td>\n",
+ " <td id=\"T_cfd1f_row14_col7\" class=\"data row14 col7\" >5.57</td>\n",
+ " <td id=\"T_cfd1f_row14_col8\" class=\"data row14 col8\" >-2.93</td>\n",
+ " <td id=\"T_cfd1f_row14_col9\" class=\"data row14 col9\" >-0.33</td>\n",
+ " <td id=\"T_cfd1f_row14_col10\" class=\"data row14 col10\" >-0.97</td>\n",
+ " <td id=\"T_cfd1f_row14_col11\" class=\"data row14 col11\" >1.72</td>\n",
+ " <td id=\"T_cfd1f_row14_col12\" class=\"data row14 col12\" >3.61</td>\n",
+ " <td id=\"T_cfd1f_row14_col13\" class=\"data row14 col13\" >0.29</td>\n",
+ " <td id=\"T_cfd1f_row14_col14\" class=\"data row14 col14\" >-4.21</td>\n",
+ " <td id=\"T_cfd1f_row14_col15\" class=\"data row14 col15\" >-4.10</td>\n",
+ " <td id=\"T_cfd1f_row14_col16\" class=\"data row14 col16\" >-6.68</td>\n",
+ " <td id=\"T_cfd1f_row14_col17\" class=\"data row14 col17\" >-4.50</td>\n",
+ " <td id=\"T_cfd1f_row14_col18\" class=\"data row14 col18\" >-2.19</td>\n",
+ " <td id=\"T_cfd1f_row14_col19\" class=\"data row14 col19\" >-2.43</td>\n",
+ " <td id=\"T_cfd1f_row14_col20\" class=\"data row14 col20\" >-1.64</td>\n",
+ " <td id=\"T_cfd1f_row14_col21\" class=\"data row14 col21\" >-9.36</td>\n",
+ " <td id=\"T_cfd1f_row14_col22\" class=\"data row14 col22\" >3.36</td>\n",
+ " <td id=\"T_cfd1f_row14_col23\" class=\"data row14 col23\" >6.11</td>\n",
+ " <td id=\"T_cfd1f_row14_col24\" class=\"data row14 col24\" >7.53</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row15\" class=\"row_heading level0 row15\" >15</th>\n",
+ " <td id=\"T_cfd1f_row15_col0\" class=\"data row15 col0\" >5.64</td>\n",
+ " <td id=\"T_cfd1f_row15_col1\" class=\"data row15 col1\" >5.31</td>\n",
+ " <td id=\"T_cfd1f_row15_col2\" class=\"data row15 col2\" >-3.98</td>\n",
+ " <td id=\"T_cfd1f_row15_col3\" class=\"data row15 col3\" >-2.26</td>\n",
+ " <td id=\"T_cfd1f_row15_col4\" class=\"data row15 col4\" >5.91</td>\n",
+ " <td id=\"T_cfd1f_row15_col5\" class=\"data row15 col5\" >-3.30</td>\n",
+ " <td id=\"T_cfd1f_row15_col6\" class=\"data row15 col6\" >-1.03</td>\n",
+ " <td id=\"T_cfd1f_row15_col7\" class=\"data row15 col7\" >5.68</td>\n",
+ " <td id=\"T_cfd1f_row15_col8\" class=\"data row15 col8\" >-3.06</td>\n",
+ " <td id=\"T_cfd1f_row15_col9\" class=\"data row15 col9\" >-0.33</td>\n",
+ " <td id=\"T_cfd1f_row15_col10\" class=\"data row15 col10\" >-1.16</td>\n",
+ " <td id=\"T_cfd1f_row15_col11\" class=\"data row15 col11\" >2.19</td>\n",
+ " <td id=\"T_cfd1f_row15_col12\" class=\"data row15 col12\" >4.20</td>\n",
+ " <td id=\"T_cfd1f_row15_col13\" class=\"data row15 col13\" >1.01</td>\n",
+ " <td id=\"T_cfd1f_row15_col14\" class=\"data row15 col14\" >-3.22</td>\n",
+ " <td id=\"T_cfd1f_row15_col15\" class=\"data row15 col15\" >-4.31</td>\n",
+ " <td id=\"T_cfd1f_row15_col16\" class=\"data row15 col16\" >-5.74</td>\n",
+ " <td id=\"T_cfd1f_row15_col17\" class=\"data row15 col17\" >-4.44</td>\n",
+ " <td id=\"T_cfd1f_row15_col18\" class=\"data row15 col18\" >-2.30</td>\n",
+ " <td id=\"T_cfd1f_row15_col19\" class=\"data row15 col19\" >-1.36</td>\n",
+ " <td id=\"T_cfd1f_row15_col20\" class=\"data row15 col20\" >-1.20</td>\n",
+ " <td id=\"T_cfd1f_row15_col21\" class=\"data row15 col21\" >-11.27</td>\n",
+ " <td id=\"T_cfd1f_row15_col22\" class=\"data row15 col22\" >2.59</td>\n",
+ " <td id=\"T_cfd1f_row15_col23\" class=\"data row15 col23\" >6.69</td>\n",
+ " <td id=\"T_cfd1f_row15_col24\" class=\"data row15 col24\" >5.91</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row16\" class=\"row_heading level0 row16\" >16</th>\n",
+ " <td id=\"T_cfd1f_row16_col0\" class=\"data row16 col0\" >4.08</td>\n",
+ " <td id=\"T_cfd1f_row16_col1\" class=\"data row16 col1\" >4.34</td>\n",
+ " <td id=\"T_cfd1f_row16_col2\" class=\"data row16 col2\" >-2.44</td>\n",
+ " <td id=\"T_cfd1f_row16_col3\" class=\"data row16 col3\" >-3.30</td>\n",
+ " <td id=\"T_cfd1f_row16_col4\" class=\"data row16 col4\" >6.04</td>\n",
+ " <td id=\"T_cfd1f_row16_col5\" class=\"data row16 col5\" >-2.52</td>\n",
+ " <td id=\"T_cfd1f_row16_col6\" class=\"data row16 col6\" >-0.47</td>\n",
+ " <td id=\"T_cfd1f_row16_col7\" class=\"data row16 col7\" >5.28</td>\n",
+ " <td id=\"T_cfd1f_row16_col8\" class=\"data row16 col8\" >-4.84</td>\n",
+ " <td id=\"T_cfd1f_row16_col9\" class=\"data row16 col9\" >1.58</td>\n",
+ " <td id=\"T_cfd1f_row16_col10\" class=\"data row16 col10\" >0.23</td>\n",
+ " <td id=\"T_cfd1f_row16_col11\" class=\"data row16 col11\" >0.10</td>\n",
+ " <td id=\"T_cfd1f_row16_col12\" class=\"data row16 col12\" >5.79</td>\n",
+ " <td id=\"T_cfd1f_row16_col13\" class=\"data row16 col13\" >1.80</td>\n",
+ " <td id=\"T_cfd1f_row16_col14\" class=\"data row16 col14\" >-3.13</td>\n",
+ " <td id=\"T_cfd1f_row16_col15\" class=\"data row16 col15\" >-3.85</td>\n",
+ " <td id=\"T_cfd1f_row16_col16\" class=\"data row16 col16\" >-5.53</td>\n",
+ " <td id=\"T_cfd1f_row16_col17\" class=\"data row16 col17\" >-2.97</td>\n",
+ " <td id=\"T_cfd1f_row16_col18\" class=\"data row16 col18\" >-2.13</td>\n",
+ " <td id=\"T_cfd1f_row16_col19\" class=\"data row16 col19\" >-1.15</td>\n",
+ " <td id=\"T_cfd1f_row16_col20\" class=\"data row16 col20\" >-0.56</td>\n",
+ " <td id=\"T_cfd1f_row16_col21\" class=\"data row16 col21\" >-13.13</td>\n",
+ " <td id=\"T_cfd1f_row16_col22\" class=\"data row16 col22\" >2.07</td>\n",
+ " <td id=\"T_cfd1f_row16_col23\" class=\"data row16 col23\" >6.16</td>\n",
+ " <td id=\"T_cfd1f_row16_col24\" class=\"data row16 col24\" >4.94</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row17\" class=\"row_heading level0 row17\" >17</th>\n",
+ " <td id=\"T_cfd1f_row17_col0\" class=\"data row17 col0\" >5.64</td>\n",
+ " <td id=\"T_cfd1f_row17_col1\" class=\"data row17 col1\" >4.57</td>\n",
+ " <td id=\"T_cfd1f_row17_col2\" class=\"data row17 col2\" >-3.53</td>\n",
+ " <td id=\"T_cfd1f_row17_col3\" class=\"data row17 col3\" >-3.76</td>\n",
+ " <td id=\"T_cfd1f_row17_col4\" class=\"data row17 col4\" >6.58</td>\n",
+ " <td id=\"T_cfd1f_row17_col5\" class=\"data row17 col5\" >-2.58</td>\n",
+ " <td id=\"T_cfd1f_row17_col6\" class=\"data row17 col6\" >-0.75</td>\n",
+ " <td id=\"T_cfd1f_row17_col7\" class=\"data row17 col7\" >6.58</td>\n",
+ " <td id=\"T_cfd1f_row17_col8\" class=\"data row17 col8\" >-4.78</td>\n",
+ " <td id=\"T_cfd1f_row17_col9\" class=\"data row17 col9\" >3.63</td>\n",
+ " <td id=\"T_cfd1f_row17_col10\" class=\"data row17 col10\" >-0.29</td>\n",
+ " <td id=\"T_cfd1f_row17_col11\" class=\"data row17 col11\" >0.56</td>\n",
+ " <td id=\"T_cfd1f_row17_col12\" class=\"data row17 col12\" >5.76</td>\n",
+ " <td id=\"T_cfd1f_row17_col13\" class=\"data row17 col13\" >2.05</td>\n",
+ " <td id=\"T_cfd1f_row17_col14\" class=\"data row17 col14\" >-2.27</td>\n",
+ " <td id=\"T_cfd1f_row17_col15\" class=\"data row17 col15\" >-2.31</td>\n",
+ " <td id=\"T_cfd1f_row17_col16\" class=\"data row17 col16\" >-4.95</td>\n",
+ " <td id=\"T_cfd1f_row17_col17\" class=\"data row17 col17\" >-3.16</td>\n",
+ " <td id=\"T_cfd1f_row17_col18\" class=\"data row17 col18\" >-3.06</td>\n",
+ " <td id=\"T_cfd1f_row17_col19\" class=\"data row17 col19\" >-2.43</td>\n",
+ " <td id=\"T_cfd1f_row17_col20\" class=\"data row17 col20\" >0.84</td>\n",
+ " <td id=\"T_cfd1f_row17_col21\" class=\"data row17 col21\" >-12.57</td>\n",
+ " <td id=\"T_cfd1f_row17_col22\" class=\"data row17 col22\" >3.56</td>\n",
+ " <td id=\"T_cfd1f_row17_col23\" class=\"data row17 col23\" >7.36</td>\n",
+ " <td id=\"T_cfd1f_row17_col24\" class=\"data row17 col24\" >4.70</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row18\" class=\"row_heading level0 row18\" >18</th>\n",
+ " <td id=\"T_cfd1f_row18_col0\" class=\"data row18 col0\" >5.99</td>\n",
+ " <td id=\"T_cfd1f_row18_col1\" class=\"data row18 col1\" >5.82</td>\n",
+ " <td id=\"T_cfd1f_row18_col2\" class=\"data row18 col2\" >-2.85</td>\n",
+ " <td id=\"T_cfd1f_row18_col3\" class=\"data row18 col3\" >-4.15</td>\n",
+ " <td id=\"T_cfd1f_row18_col4\" class=\"data row18 col4\" >7.12</td>\n",
+ " <td id=\"T_cfd1f_row18_col5\" class=\"data row18 col5\" >-3.32</td>\n",
+ " <td id=\"T_cfd1f_row18_col6\" class=\"data row18 col6\" >-1.21</td>\n",
+ " <td id=\"T_cfd1f_row18_col7\" class=\"data row18 col7\" >7.93</td>\n",
+ " <td id=\"T_cfd1f_row18_col8\" class=\"data row18 col8\" >-4.85</td>\n",
+ " <td id=\"T_cfd1f_row18_col9\" class=\"data row18 col9\" >1.44</td>\n",
+ " <td id=\"T_cfd1f_row18_col10\" class=\"data row18 col10\" >-0.63</td>\n",
+ " <td id=\"T_cfd1f_row18_col11\" class=\"data row18 col11\" >0.35</td>\n",
+ " <td id=\"T_cfd1f_row18_col12\" class=\"data row18 col12\" >7.47</td>\n",
+ " <td id=\"T_cfd1f_row18_col13\" class=\"data row18 col13\" >0.87</td>\n",
+ " <td id=\"T_cfd1f_row18_col14\" class=\"data row18 col14\" >-1.52</td>\n",
+ " <td id=\"T_cfd1f_row18_col15\" class=\"data row18 col15\" >-2.09</td>\n",
+ " <td id=\"T_cfd1f_row18_col16\" class=\"data row18 col16\" >-4.23</td>\n",
+ " <td id=\"T_cfd1f_row18_col17\" class=\"data row18 col17\" >-2.55</td>\n",
+ " <td id=\"T_cfd1f_row18_col18\" class=\"data row18 col18\" >-2.46</td>\n",
+ " <td id=\"T_cfd1f_row18_col19\" class=\"data row18 col19\" >-2.89</td>\n",
+ " <td id=\"T_cfd1f_row18_col20\" class=\"data row18 col20\" >1.90</td>\n",
+ " <td id=\"T_cfd1f_row18_col21\" class=\"data row18 col21\" >-9.74</td>\n",
+ " <td id=\"T_cfd1f_row18_col22\" class=\"data row18 col22\" >3.43</td>\n",
+ " <td id=\"T_cfd1f_row18_col23\" class=\"data row18 col23\" >7.07</td>\n",
+ " <td id=\"T_cfd1f_row18_col24\" class=\"data row18 col24\" >4.39</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_cfd1f_level0_row19\" class=\"row_heading level0 row19\" >19</th>\n",
+ " <td id=\"T_cfd1f_row19_col0\" class=\"data row19 col0\" >4.03</td>\n",
+ " <td id=\"T_cfd1f_row19_col1\" class=\"data row19 col1\" >6.23</td>\n",
+ " <td id=\"T_cfd1f_row19_col2\" class=\"data row19 col2\" >-4.10</td>\n",
+ " <td id=\"T_cfd1f_row19_col3\" class=\"data row19 col3\" >-4.11</td>\n",
+ " <td id=\"T_cfd1f_row19_col4\" class=\"data row19 col4\" >7.19</td>\n",
+ " <td id=\"T_cfd1f_row19_col5\" class=\"data row19 col5\" >-4.10</td>\n",
+ " <td id=\"T_cfd1f_row19_col6\" class=\"data row19 col6\" >-1.52</td>\n",
+ " <td id=\"T_cfd1f_row19_col7\" class=\"data row19 col7\" >6.53</td>\n",
+ " <td id=\"T_cfd1f_row19_col8\" class=\"data row19 col8\" >-5.21</td>\n",
+ " <td id=\"T_cfd1f_row19_col9\" class=\"data row19 col9\" >-0.24</td>\n",
+ " <td id=\"T_cfd1f_row19_col10\" class=\"data row19 col10\" >0.01</td>\n",
+ " <td id=\"T_cfd1f_row19_col11\" class=\"data row19 col11\" >1.16</td>\n",
+ " <td id=\"T_cfd1f_row19_col12\" class=\"data row19 col12\" >6.43</td>\n",
+ " <td id=\"T_cfd1f_row19_col13\" class=\"data row19 col13\" >-1.97</td>\n",
+ " <td id=\"T_cfd1f_row19_col14\" class=\"data row19 col14\" >-2.64</td>\n",
+ " <td id=\"T_cfd1f_row19_col15\" class=\"data row19 col15\" >-1.66</td>\n",
+ " <td id=\"T_cfd1f_row19_col16\" class=\"data row19 col16\" >-5.20</td>\n",
+ " <td id=\"T_cfd1f_row19_col17\" class=\"data row19 col17\" >-3.25</td>\n",
+ " <td id=\"T_cfd1f_row19_col18\" class=\"data row19 col18\" >-2.87</td>\n",
+ " <td id=\"T_cfd1f_row19_col19\" class=\"data row19 col19\" >-1.65</td>\n",
+ " <td id=\"T_cfd1f_row19_col20\" class=\"data row19 col20\" >1.64</td>\n",
+ " <td id=\"T_cfd1f_row19_col21\" class=\"data row19 col21\" >-10.66</td>\n",
+ " <td id=\"T_cfd1f_row19_col22\" class=\"data row19 col22\" >2.83</td>\n",
+ " <td id=\"T_cfd1f_row19_col23\" class=\"data row19 col23\" >7.48</td>\n",
+ " <td id=\"T_cfd1f_row19_col24\" class=\"data row19 col24\" >3.94</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe93a470>"
+ ]
+ },
+ "execution_count": 64,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "np.random.seed(25)\n",
+ "cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
+ "\n",
+ "bigdf.style.background_gradient(cmap, axis=1)\\\n",
+ " .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\\\n",
+ " .set_caption(\"Hover to magnify\")\\\n",
+ " .format(precision=2)\\\n",
+ " .set_table_styles(magnify())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Sticky Headers\n",
+ "\n",
+ "If you display a large matrix or DataFrame in a notebook, but you want to always see the column and row headers you can use the [.set_sticky][sticky] method which manipulates the table styles CSS.\n",
+ "\n",
+ "[sticky]: ../reference/api/pandas.io.formats.style.Styler.set_sticky.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 65,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.108101Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.107873Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.134944Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.134402Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_637af thead tr th:nth-child(1) {\n",
+ " position: sticky;\n",
+ " background-color: inherit;\n",
+ " left: 0px;\n",
+ " z-index: 3 !important;\n",
+ "}\n",
+ "#T_637af tbody tr th:nth-child(1) {\n",
+ " position: sticky;\n",
+ " background-color: inherit;\n",
+ " left: 0px;\n",
+ " z-index: 1;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_637af\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_637af_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_637af_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_637af_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " <th id=\"T_637af_level0_col3\" class=\"col_heading level0 col3\" >3</th>\n",
+ " <th id=\"T_637af_level0_col4\" class=\"col_heading level0 col4\" >4</th>\n",
+ " <th id=\"T_637af_level0_col5\" class=\"col_heading level0 col5\" >5</th>\n",
+ " <th id=\"T_637af_level0_col6\" class=\"col_heading level0 col6\" >6</th>\n",
+ " <th id=\"T_637af_level0_col7\" class=\"col_heading level0 col7\" >7</th>\n",
+ " <th id=\"T_637af_level0_col8\" class=\"col_heading level0 col8\" >8</th>\n",
+ " <th id=\"T_637af_level0_col9\" class=\"col_heading level0 col9\" >9</th>\n",
+ " <th id=\"T_637af_level0_col10\" class=\"col_heading level0 col10\" >10</th>\n",
+ " <th id=\"T_637af_level0_col11\" class=\"col_heading level0 col11\" >11</th>\n",
+ " <th id=\"T_637af_level0_col12\" class=\"col_heading level0 col12\" >12</th>\n",
+ " <th id=\"T_637af_level0_col13\" class=\"col_heading level0 col13\" >13</th>\n",
+ " <th id=\"T_637af_level0_col14\" class=\"col_heading level0 col14\" >14</th>\n",
+ " <th id=\"T_637af_level0_col15\" class=\"col_heading level0 col15\" >15</th>\n",
+ " <th id=\"T_637af_level0_col16\" class=\"col_heading level0 col16\" >16</th>\n",
+ " <th id=\"T_637af_level0_col17\" class=\"col_heading level0 col17\" >17</th>\n",
+ " <th id=\"T_637af_level0_col18\" class=\"col_heading level0 col18\" >18</th>\n",
+ " <th id=\"T_637af_level0_col19\" class=\"col_heading level0 col19\" >19</th>\n",
+ " <th id=\"T_637af_level0_col20\" class=\"col_heading level0 col20\" >20</th>\n",
+ " <th id=\"T_637af_level0_col21\" class=\"col_heading level0 col21\" >21</th>\n",
+ " <th id=\"T_637af_level0_col22\" class=\"col_heading level0 col22\" >22</th>\n",
+ " <th id=\"T_637af_level0_col23\" class=\"col_heading level0 col23\" >23</th>\n",
+ " <th id=\"T_637af_level0_col24\" class=\"col_heading level0 col24\" >24</th>\n",
+ " <th id=\"T_637af_level0_col25\" class=\"col_heading level0 col25\" >25</th>\n",
+ " <th id=\"T_637af_level0_col26\" class=\"col_heading level0 col26\" >26</th>\n",
+ " <th id=\"T_637af_level0_col27\" class=\"col_heading level0 col27\" >27</th>\n",
+ " <th id=\"T_637af_level0_col28\" class=\"col_heading level0 col28\" >28</th>\n",
+ " <th id=\"T_637af_level0_col29\" class=\"col_heading level0 col29\" >29</th>\n",
+ " <th id=\"T_637af_level0_col30\" class=\"col_heading level0 col30\" >30</th>\n",
+ " <th id=\"T_637af_level0_col31\" class=\"col_heading level0 col31\" >31</th>\n",
+ " <th id=\"T_637af_level0_col32\" class=\"col_heading level0 col32\" >32</th>\n",
+ " <th id=\"T_637af_level0_col33\" class=\"col_heading level0 col33\" >33</th>\n",
+ " <th id=\"T_637af_level0_col34\" class=\"col_heading level0 col34\" >34</th>\n",
+ " <th id=\"T_637af_level0_col35\" class=\"col_heading level0 col35\" >35</th>\n",
+ " <th id=\"T_637af_level0_col36\" class=\"col_heading level0 col36\" >36</th>\n",
+ " <th id=\"T_637af_level0_col37\" class=\"col_heading level0 col37\" >37</th>\n",
+ " <th id=\"T_637af_level0_col38\" class=\"col_heading level0 col38\" >38</th>\n",
+ " <th id=\"T_637af_level0_col39\" class=\"col_heading level0 col39\" >39</th>\n",
+ " <th id=\"T_637af_level0_col40\" class=\"col_heading level0 col40\" >40</th>\n",
+ " <th id=\"T_637af_level0_col41\" class=\"col_heading level0 col41\" >41</th>\n",
+ " <th id=\"T_637af_level0_col42\" class=\"col_heading level0 col42\" >42</th>\n",
+ " <th id=\"T_637af_level0_col43\" class=\"col_heading level0 col43\" >43</th>\n",
+ " <th id=\"T_637af_level0_col44\" class=\"col_heading level0 col44\" >44</th>\n",
+ " <th id=\"T_637af_level0_col45\" class=\"col_heading level0 col45\" >45</th>\n",
+ " <th id=\"T_637af_level0_col46\" class=\"col_heading level0 col46\" >46</th>\n",
+ " <th id=\"T_637af_level0_col47\" class=\"col_heading level0 col47\" >47</th>\n",
+ " <th id=\"T_637af_level0_col48\" class=\"col_heading level0 col48\" >48</th>\n",
+ " <th id=\"T_637af_level0_col49\" class=\"col_heading level0 col49\" >49</th>\n",
+ " <th id=\"T_637af_level0_col50\" class=\"col_heading level0 col50\" >50</th>\n",
+ " <th id=\"T_637af_level0_col51\" class=\"col_heading level0 col51\" >51</th>\n",
+ " <th id=\"T_637af_level0_col52\" class=\"col_heading level0 col52\" >52</th>\n",
+ " <th id=\"T_637af_level0_col53\" class=\"col_heading level0 col53\" >53</th>\n",
+ " <th id=\"T_637af_level0_col54\" class=\"col_heading level0 col54\" >54</th>\n",
+ " <th id=\"T_637af_level0_col55\" class=\"col_heading level0 col55\" >55</th>\n",
+ " <th id=\"T_637af_level0_col56\" class=\"col_heading level0 col56\" >56</th>\n",
+ " <th id=\"T_637af_level0_col57\" class=\"col_heading level0 col57\" >57</th>\n",
+ " <th id=\"T_637af_level0_col58\" class=\"col_heading level0 col58\" >58</th>\n",
+ " <th id=\"T_637af_level0_col59\" class=\"col_heading level0 col59\" >59</th>\n",
+ " <th id=\"T_637af_level0_col60\" class=\"col_heading level0 col60\" >60</th>\n",
+ " <th id=\"T_637af_level0_col61\" class=\"col_heading level0 col61\" >61</th>\n",
+ " <th id=\"T_637af_level0_col62\" class=\"col_heading level0 col62\" >62</th>\n",
+ " <th id=\"T_637af_level0_col63\" class=\"col_heading level0 col63\" >63</th>\n",
+ " <th id=\"T_637af_level0_col64\" class=\"col_heading level0 col64\" >64</th>\n",
+ " <th id=\"T_637af_level0_col65\" class=\"col_heading level0 col65\" >65</th>\n",
+ " <th id=\"T_637af_level0_col66\" class=\"col_heading level0 col66\" >66</th>\n",
+ " <th id=\"T_637af_level0_col67\" class=\"col_heading level0 col67\" >67</th>\n",
+ " <th id=\"T_637af_level0_col68\" class=\"col_heading level0 col68\" >68</th>\n",
+ " <th id=\"T_637af_level0_col69\" class=\"col_heading level0 col69\" >69</th>\n",
+ " <th id=\"T_637af_level0_col70\" class=\"col_heading level0 col70\" >70</th>\n",
+ " <th id=\"T_637af_level0_col71\" class=\"col_heading level0 col71\" >71</th>\n",
+ " <th id=\"T_637af_level0_col72\" class=\"col_heading level0 col72\" >72</th>\n",
+ " <th id=\"T_637af_level0_col73\" class=\"col_heading level0 col73\" >73</th>\n",
+ " <th id=\"T_637af_level0_col74\" class=\"col_heading level0 col74\" >74</th>\n",
+ " <th id=\"T_637af_level0_col75\" class=\"col_heading level0 col75\" >75</th>\n",
+ " <th id=\"T_637af_level0_col76\" class=\"col_heading level0 col76\" >76</th>\n",
+ " <th id=\"T_637af_level0_col77\" class=\"col_heading level0 col77\" >77</th>\n",
+ " <th id=\"T_637af_level0_col78\" class=\"col_heading level0 col78\" >78</th>\n",
+ " <th id=\"T_637af_level0_col79\" class=\"col_heading level0 col79\" >79</th>\n",
+ " <th id=\"T_637af_level0_col80\" class=\"col_heading level0 col80\" >80</th>\n",
+ " <th id=\"T_637af_level0_col81\" class=\"col_heading level0 col81\" >81</th>\n",
+ " <th id=\"T_637af_level0_col82\" class=\"col_heading level0 col82\" >82</th>\n",
+ " <th id=\"T_637af_level0_col83\" class=\"col_heading level0 col83\" >83</th>\n",
+ " <th id=\"T_637af_level0_col84\" class=\"col_heading level0 col84\" >84</th>\n",
+ " <th id=\"T_637af_level0_col85\" class=\"col_heading level0 col85\" >85</th>\n",
+ " <th id=\"T_637af_level0_col86\" class=\"col_heading level0 col86\" >86</th>\n",
+ " <th id=\"T_637af_level0_col87\" class=\"col_heading level0 col87\" >87</th>\n",
+ " <th id=\"T_637af_level0_col88\" class=\"col_heading level0 col88\" >88</th>\n",
+ " <th id=\"T_637af_level0_col89\" class=\"col_heading level0 col89\" >89</th>\n",
+ " <th id=\"T_637af_level0_col90\" class=\"col_heading level0 col90\" >90</th>\n",
+ " <th id=\"T_637af_level0_col91\" class=\"col_heading level0 col91\" >91</th>\n",
+ " <th id=\"T_637af_level0_col92\" class=\"col_heading level0 col92\" >92</th>\n",
+ " <th id=\"T_637af_level0_col93\" class=\"col_heading level0 col93\" >93</th>\n",
+ " <th id=\"T_637af_level0_col94\" class=\"col_heading level0 col94\" >94</th>\n",
+ " <th id=\"T_637af_level0_col95\" class=\"col_heading level0 col95\" >95</th>\n",
+ " <th id=\"T_637af_level0_col96\" class=\"col_heading level0 col96\" >96</th>\n",
+ " <th id=\"T_637af_level0_col97\" class=\"col_heading level0 col97\" >97</th>\n",
+ " <th id=\"T_637af_level0_col98\" class=\"col_heading level0 col98\" >98</th>\n",
+ " <th id=\"T_637af_level0_col99\" class=\"col_heading level0 col99\" >99</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_637af_row0_col0\" class=\"data row0 col0\" >-0.773866</td>\n",
+ " <td id=\"T_637af_row0_col1\" class=\"data row0 col1\" >-0.240521</td>\n",
+ " <td id=\"T_637af_row0_col2\" class=\"data row0 col2\" >-0.217165</td>\n",
+ " <td id=\"T_637af_row0_col3\" class=\"data row0 col3\" >1.173609</td>\n",
+ " <td id=\"T_637af_row0_col4\" class=\"data row0 col4\" >0.686390</td>\n",
+ " <td id=\"T_637af_row0_col5\" class=\"data row0 col5\" >0.008358</td>\n",
+ " <td id=\"T_637af_row0_col6\" class=\"data row0 col6\" >0.696232</td>\n",
+ " <td id=\"T_637af_row0_col7\" class=\"data row0 col7\" >0.173166</td>\n",
+ " <td id=\"T_637af_row0_col8\" class=\"data row0 col8\" >0.620498</td>\n",
+ " <td id=\"T_637af_row0_col9\" class=\"data row0 col9\" >0.504067</td>\n",
+ " <td id=\"T_637af_row0_col10\" class=\"data row0 col10\" >0.428066</td>\n",
+ " <td id=\"T_637af_row0_col11\" class=\"data row0 col11\" >-0.051824</td>\n",
+ " <td id=\"T_637af_row0_col12\" class=\"data row0 col12\" >0.719915</td>\n",
+ " <td id=\"T_637af_row0_col13\" class=\"data row0 col13\" >0.057165</td>\n",
+ " <td id=\"T_637af_row0_col14\" class=\"data row0 col14\" >0.562808</td>\n",
+ " <td id=\"T_637af_row0_col15\" class=\"data row0 col15\" >-0.369536</td>\n",
+ " <td id=\"T_637af_row0_col16\" class=\"data row0 col16\" >0.483399</td>\n",
+ " <td id=\"T_637af_row0_col17\" class=\"data row0 col17\" >0.620765</td>\n",
+ " <td id=\"T_637af_row0_col18\" class=\"data row0 col18\" >-0.354342</td>\n",
+ " <td id=\"T_637af_row0_col19\" class=\"data row0 col19\" >-1.469471</td>\n",
+ " <td id=\"T_637af_row0_col20\" class=\"data row0 col20\" >-1.937266</td>\n",
+ " <td id=\"T_637af_row0_col21\" class=\"data row0 col21\" >0.038031</td>\n",
+ " <td id=\"T_637af_row0_col22\" class=\"data row0 col22\" >-1.518162</td>\n",
+ " <td id=\"T_637af_row0_col23\" class=\"data row0 col23\" >-0.417599</td>\n",
+ " <td id=\"T_637af_row0_col24\" class=\"data row0 col24\" >0.386717</td>\n",
+ " <td id=\"T_637af_row0_col25\" class=\"data row0 col25\" >0.716193</td>\n",
+ " <td id=\"T_637af_row0_col26\" class=\"data row0 col26\" >0.489961</td>\n",
+ " <td id=\"T_637af_row0_col27\" class=\"data row0 col27\" >0.733957</td>\n",
+ " <td id=\"T_637af_row0_col28\" class=\"data row0 col28\" >0.914415</td>\n",
+ " <td id=\"T_637af_row0_col29\" class=\"data row0 col29\" >0.679894</td>\n",
+ " <td id=\"T_637af_row0_col30\" class=\"data row0 col30\" >0.255448</td>\n",
+ " <td id=\"T_637af_row0_col31\" class=\"data row0 col31\" >-0.508338</td>\n",
+ " <td id=\"T_637af_row0_col32\" class=\"data row0 col32\" >0.332030</td>\n",
+ " <td id=\"T_637af_row0_col33\" class=\"data row0 col33\" >-0.111107</td>\n",
+ " <td id=\"T_637af_row0_col34\" class=\"data row0 col34\" >-0.251983</td>\n",
+ " <td id=\"T_637af_row0_col35\" class=\"data row0 col35\" >-1.456620</td>\n",
+ " <td id=\"T_637af_row0_col36\" class=\"data row0 col36\" >0.409630</td>\n",
+ " <td id=\"T_637af_row0_col37\" class=\"data row0 col37\" >1.062320</td>\n",
+ " <td id=\"T_637af_row0_col38\" class=\"data row0 col38\" >-0.577115</td>\n",
+ " <td id=\"T_637af_row0_col39\" class=\"data row0 col39\" >0.718796</td>\n",
+ " <td id=\"T_637af_row0_col40\" class=\"data row0 col40\" >-0.399260</td>\n",
+ " <td id=\"T_637af_row0_col41\" class=\"data row0 col41\" >-1.311389</td>\n",
+ " <td id=\"T_637af_row0_col42\" class=\"data row0 col42\" >0.649122</td>\n",
+ " <td id=\"T_637af_row0_col43\" class=\"data row0 col43\" >0.091566</td>\n",
+ " <td id=\"T_637af_row0_col44\" class=\"data row0 col44\" >0.628872</td>\n",
+ " <td id=\"T_637af_row0_col45\" class=\"data row0 col45\" >0.297894</td>\n",
+ " <td id=\"T_637af_row0_col46\" class=\"data row0 col46\" >-0.142290</td>\n",
+ " <td id=\"T_637af_row0_col47\" class=\"data row0 col47\" >-0.542291</td>\n",
+ " <td id=\"T_637af_row0_col48\" class=\"data row0 col48\" >-0.914290</td>\n",
+ " <td id=\"T_637af_row0_col49\" class=\"data row0 col49\" >1.144514</td>\n",
+ " <td id=\"T_637af_row0_col50\" class=\"data row0 col50\" >0.313584</td>\n",
+ " <td id=\"T_637af_row0_col51\" class=\"data row0 col51\" >1.182635</td>\n",
+ " <td id=\"T_637af_row0_col52\" class=\"data row0 col52\" >1.214235</td>\n",
+ " <td id=\"T_637af_row0_col53\" class=\"data row0 col53\" >-0.416446</td>\n",
+ " <td id=\"T_637af_row0_col54\" class=\"data row0 col54\" >-1.653940</td>\n",
+ " <td id=\"T_637af_row0_col55\" class=\"data row0 col55\" >-2.550787</td>\n",
+ " <td id=\"T_637af_row0_col56\" class=\"data row0 col56\" >0.442473</td>\n",
+ " <td id=\"T_637af_row0_col57\" class=\"data row0 col57\" >0.052127</td>\n",
+ " <td id=\"T_637af_row0_col58\" class=\"data row0 col58\" >-0.464469</td>\n",
+ " <td id=\"T_637af_row0_col59\" class=\"data row0 col59\" >-0.523852</td>\n",
+ " <td id=\"T_637af_row0_col60\" class=\"data row0 col60\" >0.989726</td>\n",
+ " <td id=\"T_637af_row0_col61\" class=\"data row0 col61\" >-1.325539</td>\n",
+ " <td id=\"T_637af_row0_col62\" class=\"data row0 col62\" >-0.199687</td>\n",
+ " <td id=\"T_637af_row0_col63\" class=\"data row0 col63\" >-1.226727</td>\n",
+ " <td id=\"T_637af_row0_col64\" class=\"data row0 col64\" >0.290018</td>\n",
+ " <td id=\"T_637af_row0_col65\" class=\"data row0 col65\" >1.164574</td>\n",
+ " <td id=\"T_637af_row0_col66\" class=\"data row0 col66\" >0.817841</td>\n",
+ " <td id=\"T_637af_row0_col67\" class=\"data row0 col67\" >-0.309509</td>\n",
+ " <td id=\"T_637af_row0_col68\" class=\"data row0 col68\" >0.496599</td>\n",
+ " <td id=\"T_637af_row0_col69\" class=\"data row0 col69\" >0.943536</td>\n",
+ " <td id=\"T_637af_row0_col70\" class=\"data row0 col70\" >-0.091850</td>\n",
+ " <td id=\"T_637af_row0_col71\" class=\"data row0 col71\" >-2.802658</td>\n",
+ " <td id=\"T_637af_row0_col72\" class=\"data row0 col72\" >2.126219</td>\n",
+ " <td id=\"T_637af_row0_col73\" class=\"data row0 col73\" >-0.521161</td>\n",
+ " <td id=\"T_637af_row0_col74\" class=\"data row0 col74\" >0.288098</td>\n",
+ " <td id=\"T_637af_row0_col75\" class=\"data row0 col75\" >-0.454663</td>\n",
+ " <td id=\"T_637af_row0_col76\" class=\"data row0 col76\" >-1.676143</td>\n",
+ " <td id=\"T_637af_row0_col77\" class=\"data row0 col77\" >-0.357661</td>\n",
+ " <td id=\"T_637af_row0_col78\" class=\"data row0 col78\" >-0.788960</td>\n",
+ " <td id=\"T_637af_row0_col79\" class=\"data row0 col79\" >0.185911</td>\n",
+ " <td id=\"T_637af_row0_col80\" class=\"data row0 col80\" >-0.017106</td>\n",
+ " <td id=\"T_637af_row0_col81\" class=\"data row0 col81\" >2.454020</td>\n",
+ " <td id=\"T_637af_row0_col82\" class=\"data row0 col82\" >1.832706</td>\n",
+ " <td id=\"T_637af_row0_col83\" class=\"data row0 col83\" >-0.911743</td>\n",
+ " <td id=\"T_637af_row0_col84\" class=\"data row0 col84\" >-0.655873</td>\n",
+ " <td id=\"T_637af_row0_col85\" class=\"data row0 col85\" >-0.000514</td>\n",
+ " <td id=\"T_637af_row0_col86\" class=\"data row0 col86\" >-2.226997</td>\n",
+ " <td id=\"T_637af_row0_col87\" class=\"data row0 col87\" >0.677285</td>\n",
+ " <td id=\"T_637af_row0_col88\" class=\"data row0 col88\" >-0.140249</td>\n",
+ " <td id=\"T_637af_row0_col89\" class=\"data row0 col89\" >-0.408407</td>\n",
+ " <td id=\"T_637af_row0_col90\" class=\"data row0 col90\" >-0.838665</td>\n",
+ " <td id=\"T_637af_row0_col91\" class=\"data row0 col91\" >0.482228</td>\n",
+ " <td id=\"T_637af_row0_col92\" class=\"data row0 col92\" >1.243458</td>\n",
+ " <td id=\"T_637af_row0_col93\" class=\"data row0 col93\" >-0.477394</td>\n",
+ " <td id=\"T_637af_row0_col94\" class=\"data row0 col94\" >-0.220343</td>\n",
+ " <td id=\"T_637af_row0_col95\" class=\"data row0 col95\" >-2.463966</td>\n",
+ " <td id=\"T_637af_row0_col96\" class=\"data row0 col96\" >0.237325</td>\n",
+ " <td id=\"T_637af_row0_col97\" class=\"data row0 col97\" >-0.307380</td>\n",
+ " <td id=\"T_637af_row0_col98\" class=\"data row0 col98\" >1.172478</td>\n",
+ " <td id=\"T_637af_row0_col99\" class=\"data row0 col99\" >0.819492</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n",
+ " <td id=\"T_637af_row1_col0\" class=\"data row1 col0\" >0.405906</td>\n",
+ " <td id=\"T_637af_row1_col1\" class=\"data row1 col1\" >-0.978919</td>\n",
+ " <td id=\"T_637af_row1_col2\" class=\"data row1 col2\" >1.267526</td>\n",
+ " <td id=\"T_637af_row1_col3\" class=\"data row1 col3\" >0.145250</td>\n",
+ " <td id=\"T_637af_row1_col4\" class=\"data row1 col4\" >-1.066786</td>\n",
+ " <td id=\"T_637af_row1_col5\" class=\"data row1 col5\" >-2.114192</td>\n",
+ " <td id=\"T_637af_row1_col6\" class=\"data row1 col6\" >-1.128346</td>\n",
+ " <td id=\"T_637af_row1_col7\" class=\"data row1 col7\" >-1.082523</td>\n",
+ " <td id=\"T_637af_row1_col8\" class=\"data row1 col8\" >0.372216</td>\n",
+ " <td id=\"T_637af_row1_col9\" class=\"data row1 col9\" >0.004127</td>\n",
+ " <td id=\"T_637af_row1_col10\" class=\"data row1 col10\" >-0.211984</td>\n",
+ " <td id=\"T_637af_row1_col11\" class=\"data row1 col11\" >0.937326</td>\n",
+ " <td id=\"T_637af_row1_col12\" class=\"data row1 col12\" >-0.935890</td>\n",
+ " <td id=\"T_637af_row1_col13\" class=\"data row1 col13\" >-1.704118</td>\n",
+ " <td id=\"T_637af_row1_col14\" class=\"data row1 col14\" >0.611789</td>\n",
+ " <td id=\"T_637af_row1_col15\" class=\"data row1 col15\" >-1.030015</td>\n",
+ " <td id=\"T_637af_row1_col16\" class=\"data row1 col16\" >0.636123</td>\n",
+ " <td id=\"T_637af_row1_col17\" class=\"data row1 col17\" >-1.506193</td>\n",
+ " <td id=\"T_637af_row1_col18\" class=\"data row1 col18\" >1.736609</td>\n",
+ " <td id=\"T_637af_row1_col19\" class=\"data row1 col19\" >1.392958</td>\n",
+ " <td id=\"T_637af_row1_col20\" class=\"data row1 col20\" >1.009424</td>\n",
+ " <td id=\"T_637af_row1_col21\" class=\"data row1 col21\" >0.353266</td>\n",
+ " <td id=\"T_637af_row1_col22\" class=\"data row1 col22\" >0.697339</td>\n",
+ " <td id=\"T_637af_row1_col23\" class=\"data row1 col23\" >-0.297424</td>\n",
+ " <td id=\"T_637af_row1_col24\" class=\"data row1 col24\" >0.428702</td>\n",
+ " <td id=\"T_637af_row1_col25\" class=\"data row1 col25\" >-0.145346</td>\n",
+ " <td id=\"T_637af_row1_col26\" class=\"data row1 col26\" >-0.333553</td>\n",
+ " <td id=\"T_637af_row1_col27\" class=\"data row1 col27\" >-0.974699</td>\n",
+ " <td id=\"T_637af_row1_col28\" class=\"data row1 col28\" >0.665314</td>\n",
+ " <td id=\"T_637af_row1_col29\" class=\"data row1 col29\" >0.971944</td>\n",
+ " <td id=\"T_637af_row1_col30\" class=\"data row1 col30\" >0.121950</td>\n",
+ " <td id=\"T_637af_row1_col31\" class=\"data row1 col31\" >-1.439668</td>\n",
+ " <td id=\"T_637af_row1_col32\" class=\"data row1 col32\" >1.018808</td>\n",
+ " <td id=\"T_637af_row1_col33\" class=\"data row1 col33\" >1.442399</td>\n",
+ " <td id=\"T_637af_row1_col34\" class=\"data row1 col34\" >-0.199585</td>\n",
+ " <td id=\"T_637af_row1_col35\" class=\"data row1 col35\" >-1.165916</td>\n",
+ " <td id=\"T_637af_row1_col36\" class=\"data row1 col36\" >0.645656</td>\n",
+ " <td id=\"T_637af_row1_col37\" class=\"data row1 col37\" >1.436466</td>\n",
+ " <td id=\"T_637af_row1_col38\" class=\"data row1 col38\" >-0.921215</td>\n",
+ " <td id=\"T_637af_row1_col39\" class=\"data row1 col39\" >1.293906</td>\n",
+ " <td id=\"T_637af_row1_col40\" class=\"data row1 col40\" >-2.706443</td>\n",
+ " <td id=\"T_637af_row1_col41\" class=\"data row1 col41\" >1.460928</td>\n",
+ " <td id=\"T_637af_row1_col42\" class=\"data row1 col42\" >-0.823197</td>\n",
+ " <td id=\"T_637af_row1_col43\" class=\"data row1 col43\" >0.292952</td>\n",
+ " <td id=\"T_637af_row1_col44\" class=\"data row1 col44\" >-1.448992</td>\n",
+ " <td id=\"T_637af_row1_col45\" class=\"data row1 col45\" >0.026692</td>\n",
+ " <td id=\"T_637af_row1_col46\" class=\"data row1 col46\" >-0.975883</td>\n",
+ " <td id=\"T_637af_row1_col47\" class=\"data row1 col47\" >0.392823</td>\n",
+ " <td id=\"T_637af_row1_col48\" class=\"data row1 col48\" >0.442166</td>\n",
+ " <td id=\"T_637af_row1_col49\" class=\"data row1 col49\" >0.745741</td>\n",
+ " <td id=\"T_637af_row1_col50\" class=\"data row1 col50\" >1.187982</td>\n",
+ " <td id=\"T_637af_row1_col51\" class=\"data row1 col51\" >-0.218570</td>\n",
+ " <td id=\"T_637af_row1_col52\" class=\"data row1 col52\" >0.305288</td>\n",
+ " <td id=\"T_637af_row1_col53\" class=\"data row1 col53\" >0.054932</td>\n",
+ " <td id=\"T_637af_row1_col54\" class=\"data row1 col54\" >-1.476953</td>\n",
+ " <td id=\"T_637af_row1_col55\" class=\"data row1 col55\" >-0.114434</td>\n",
+ " <td id=\"T_637af_row1_col56\" class=\"data row1 col56\" >0.014103</td>\n",
+ " <td id=\"T_637af_row1_col57\" class=\"data row1 col57\" >0.825394</td>\n",
+ " <td id=\"T_637af_row1_col58\" class=\"data row1 col58\" >-0.060654</td>\n",
+ " <td id=\"T_637af_row1_col59\" class=\"data row1 col59\" >-0.413688</td>\n",
+ " <td id=\"T_637af_row1_col60\" class=\"data row1 col60\" >0.974836</td>\n",
+ " <td id=\"T_637af_row1_col61\" class=\"data row1 col61\" >1.339210</td>\n",
+ " <td id=\"T_637af_row1_col62\" class=\"data row1 col62\" >1.034838</td>\n",
+ " <td id=\"T_637af_row1_col63\" class=\"data row1 col63\" >0.040775</td>\n",
+ " <td id=\"T_637af_row1_col64\" class=\"data row1 col64\" >0.705001</td>\n",
+ " <td id=\"T_637af_row1_col65\" class=\"data row1 col65\" >0.017796</td>\n",
+ " <td id=\"T_637af_row1_col66\" class=\"data row1 col66\" >1.867681</td>\n",
+ " <td id=\"T_637af_row1_col67\" class=\"data row1 col67\" >-0.390173</td>\n",
+ " <td id=\"T_637af_row1_col68\" class=\"data row1 col68\" >2.285277</td>\n",
+ " <td id=\"T_637af_row1_col69\" class=\"data row1 col69\" >2.311464</td>\n",
+ " <td id=\"T_637af_row1_col70\" class=\"data row1 col70\" >-0.085070</td>\n",
+ " <td id=\"T_637af_row1_col71\" class=\"data row1 col71\" >-0.648115</td>\n",
+ " <td id=\"T_637af_row1_col72\" class=\"data row1 col72\" >0.576300</td>\n",
+ " <td id=\"T_637af_row1_col73\" class=\"data row1 col73\" >-0.790087</td>\n",
+ " <td id=\"T_637af_row1_col74\" class=\"data row1 col74\" >-1.183798</td>\n",
+ " <td id=\"T_637af_row1_col75\" class=\"data row1 col75\" >-1.334558</td>\n",
+ " <td id=\"T_637af_row1_col76\" class=\"data row1 col76\" >-0.454118</td>\n",
+ " <td id=\"T_637af_row1_col77\" class=\"data row1 col77\" >0.319302</td>\n",
+ " <td id=\"T_637af_row1_col78\" class=\"data row1 col78\" >1.706488</td>\n",
+ " <td id=\"T_637af_row1_col79\" class=\"data row1 col79\" >0.830429</td>\n",
+ " <td id=\"T_637af_row1_col80\" class=\"data row1 col80\" >0.502476</td>\n",
+ " <td id=\"T_637af_row1_col81\" class=\"data row1 col81\" >-0.079631</td>\n",
+ " <td id=\"T_637af_row1_col82\" class=\"data row1 col82\" >0.414635</td>\n",
+ " <td id=\"T_637af_row1_col83\" class=\"data row1 col83\" >0.332511</td>\n",
+ " <td id=\"T_637af_row1_col84\" class=\"data row1 col84\" >0.042935</td>\n",
+ " <td id=\"T_637af_row1_col85\" class=\"data row1 col85\" >-0.160910</td>\n",
+ " <td id=\"T_637af_row1_col86\" class=\"data row1 col86\" >0.918553</td>\n",
+ " <td id=\"T_637af_row1_col87\" class=\"data row1 col87\" >-0.292697</td>\n",
+ " <td id=\"T_637af_row1_col88\" class=\"data row1 col88\" >-1.303834</td>\n",
+ " <td id=\"T_637af_row1_col89\" class=\"data row1 col89\" >-0.199604</td>\n",
+ " <td id=\"T_637af_row1_col90\" class=\"data row1 col90\" >0.871023</td>\n",
+ " <td id=\"T_637af_row1_col91\" class=\"data row1 col91\" >-1.370681</td>\n",
+ " <td id=\"T_637af_row1_col92\" class=\"data row1 col92\" >-0.205701</td>\n",
+ " <td id=\"T_637af_row1_col93\" class=\"data row1 col93\" >-0.492973</td>\n",
+ " <td id=\"T_637af_row1_col94\" class=\"data row1 col94\" >1.123083</td>\n",
+ " <td id=\"T_637af_row1_col95\" class=\"data row1 col95\" >-0.081842</td>\n",
+ " <td id=\"T_637af_row1_col96\" class=\"data row1 col96\" >-0.118527</td>\n",
+ " <td id=\"T_637af_row1_col97\" class=\"data row1 col97\" >0.245838</td>\n",
+ " <td id=\"T_637af_row1_col98\" class=\"data row1 col98\" >-0.315742</td>\n",
+ " <td id=\"T_637af_row1_col99\" class=\"data row1 col99\" >-0.511806</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n",
+ " <td id=\"T_637af_row2_col0\" class=\"data row2 col0\" >0.011470</td>\n",
+ " <td id=\"T_637af_row2_col1\" class=\"data row2 col1\" >-0.036104</td>\n",
+ " <td id=\"T_637af_row2_col2\" class=\"data row2 col2\" >1.399603</td>\n",
+ " <td id=\"T_637af_row2_col3\" class=\"data row2 col3\" >-0.418176</td>\n",
+ " <td id=\"T_637af_row2_col4\" class=\"data row2 col4\" >-0.412229</td>\n",
+ " <td id=\"T_637af_row2_col5\" class=\"data row2 col5\" >-1.234783</td>\n",
+ " <td id=\"T_637af_row2_col6\" class=\"data row2 col6\" >-1.121500</td>\n",
+ " <td id=\"T_637af_row2_col7\" class=\"data row2 col7\" >1.196478</td>\n",
+ " <td id=\"T_637af_row2_col8\" class=\"data row2 col8\" >-0.569522</td>\n",
+ " <td id=\"T_637af_row2_col9\" class=\"data row2 col9\" >0.422022</td>\n",
+ " <td id=\"T_637af_row2_col10\" class=\"data row2 col10\" >-0.220484</td>\n",
+ " <td id=\"T_637af_row2_col11\" class=\"data row2 col11\" >0.804338</td>\n",
+ " <td id=\"T_637af_row2_col12\" class=\"data row2 col12\" >2.892667</td>\n",
+ " <td id=\"T_637af_row2_col13\" class=\"data row2 col13\" >-0.511055</td>\n",
+ " <td id=\"T_637af_row2_col14\" class=\"data row2 col14\" >-0.168722</td>\n",
+ " <td id=\"T_637af_row2_col15\" class=\"data row2 col15\" >-1.477996</td>\n",
+ " <td id=\"T_637af_row2_col16\" class=\"data row2 col16\" >-1.969917</td>\n",
+ " <td id=\"T_637af_row2_col17\" class=\"data row2 col17\" >0.471354</td>\n",
+ " <td id=\"T_637af_row2_col18\" class=\"data row2 col18\" >1.698548</td>\n",
+ " <td id=\"T_637af_row2_col19\" class=\"data row2 col19\" >0.137105</td>\n",
+ " <td id=\"T_637af_row2_col20\" class=\"data row2 col20\" >-0.762052</td>\n",
+ " <td id=\"T_637af_row2_col21\" class=\"data row2 col21\" >0.199379</td>\n",
+ " <td id=\"T_637af_row2_col22\" class=\"data row2 col22\" >-0.964346</td>\n",
+ " <td id=\"T_637af_row2_col23\" class=\"data row2 col23\" >-0.256692</td>\n",
+ " <td id=\"T_637af_row2_col24\" class=\"data row2 col24\" >1.265275</td>\n",
+ " <td id=\"T_637af_row2_col25\" class=\"data row2 col25\" >0.848762</td>\n",
+ " <td id=\"T_637af_row2_col26\" class=\"data row2 col26\" >-0.784161</td>\n",
+ " <td id=\"T_637af_row2_col27\" class=\"data row2 col27\" >1.863776</td>\n",
+ " <td id=\"T_637af_row2_col28\" class=\"data row2 col28\" >-0.355569</td>\n",
+ " <td id=\"T_637af_row2_col29\" class=\"data row2 col29\" >0.854552</td>\n",
+ " <td id=\"T_637af_row2_col30\" class=\"data row2 col30\" >0.768061</td>\n",
+ " <td id=\"T_637af_row2_col31\" class=\"data row2 col31\" >-2.075718</td>\n",
+ " <td id=\"T_637af_row2_col32\" class=\"data row2 col32\" >-2.501069</td>\n",
+ " <td id=\"T_637af_row2_col33\" class=\"data row2 col33\" >1.109868</td>\n",
+ " <td id=\"T_637af_row2_col34\" class=\"data row2 col34\" >0.957545</td>\n",
+ " <td id=\"T_637af_row2_col35\" class=\"data row2 col35\" >-0.683276</td>\n",
+ " <td id=\"T_637af_row2_col36\" class=\"data row2 col36\" >0.307764</td>\n",
+ " <td id=\"T_637af_row2_col37\" class=\"data row2 col37\" >0.733073</td>\n",
+ " <td id=\"T_637af_row2_col38\" class=\"data row2 col38\" >1.706250</td>\n",
+ " <td id=\"T_637af_row2_col39\" class=\"data row2 col39\" >-1.118091</td>\n",
+ " <td id=\"T_637af_row2_col40\" class=\"data row2 col40\" >0.374961</td>\n",
+ " <td id=\"T_637af_row2_col41\" class=\"data row2 col41\" >-1.414503</td>\n",
+ " <td id=\"T_637af_row2_col42\" class=\"data row2 col42\" >-0.524183</td>\n",
+ " <td id=\"T_637af_row2_col43\" class=\"data row2 col43\" >-1.662696</td>\n",
+ " <td id=\"T_637af_row2_col44\" class=\"data row2 col44\" >0.687921</td>\n",
+ " <td id=\"T_637af_row2_col45\" class=\"data row2 col45\" >0.521732</td>\n",
+ " <td id=\"T_637af_row2_col46\" class=\"data row2 col46\" >1.451396</td>\n",
+ " <td id=\"T_637af_row2_col47\" class=\"data row2 col47\" >-0.833491</td>\n",
+ " <td id=\"T_637af_row2_col48\" class=\"data row2 col48\" >-0.362796</td>\n",
+ " <td id=\"T_637af_row2_col49\" class=\"data row2 col49\" >-1.174444</td>\n",
+ " <td id=\"T_637af_row2_col50\" class=\"data row2 col50\" >-0.813893</td>\n",
+ " <td id=\"T_637af_row2_col51\" class=\"data row2 col51\" >-0.893220</td>\n",
+ " <td id=\"T_637af_row2_col52\" class=\"data row2 col52\" >0.770743</td>\n",
+ " <td id=\"T_637af_row2_col53\" class=\"data row2 col53\" >1.156647</td>\n",
+ " <td id=\"T_637af_row2_col54\" class=\"data row2 col54\" >-0.647444</td>\n",
+ " <td id=\"T_637af_row2_col55\" class=\"data row2 col55\" >0.125929</td>\n",
+ " <td id=\"T_637af_row2_col56\" class=\"data row2 col56\" >0.513600</td>\n",
+ " <td id=\"T_637af_row2_col57\" class=\"data row2 col57\" >-0.537874</td>\n",
+ " <td id=\"T_637af_row2_col58\" class=\"data row2 col58\" >1.992052</td>\n",
+ " <td id=\"T_637af_row2_col59\" class=\"data row2 col59\" >-1.946584</td>\n",
+ " <td id=\"T_637af_row2_col60\" class=\"data row2 col60\" >-0.104759</td>\n",
+ " <td id=\"T_637af_row2_col61\" class=\"data row2 col61\" >0.484779</td>\n",
+ " <td id=\"T_637af_row2_col62\" class=\"data row2 col62\" >-0.290936</td>\n",
+ " <td id=\"T_637af_row2_col63\" class=\"data row2 col63\" >-0.441075</td>\n",
+ " <td id=\"T_637af_row2_col64\" class=\"data row2 col64\" >0.542993</td>\n",
+ " <td id=\"T_637af_row2_col65\" class=\"data row2 col65\" >-1.050038</td>\n",
+ " <td id=\"T_637af_row2_col66\" class=\"data row2 col66\" >1.630482</td>\n",
+ " <td id=\"T_637af_row2_col67\" class=\"data row2 col67\" >0.239771</td>\n",
+ " <td id=\"T_637af_row2_col68\" class=\"data row2 col68\" >-1.177310</td>\n",
+ " <td id=\"T_637af_row2_col69\" class=\"data row2 col69\" >0.464804</td>\n",
+ " <td id=\"T_637af_row2_col70\" class=\"data row2 col70\" >-0.966995</td>\n",
+ " <td id=\"T_637af_row2_col71\" class=\"data row2 col71\" >0.646086</td>\n",
+ " <td id=\"T_637af_row2_col72\" class=\"data row2 col72\" >0.486899</td>\n",
+ " <td id=\"T_637af_row2_col73\" class=\"data row2 col73\" >1.022196</td>\n",
+ " <td id=\"T_637af_row2_col74\" class=\"data row2 col74\" >-2.267827</td>\n",
+ " <td id=\"T_637af_row2_col75\" class=\"data row2 col75\" >-1.229616</td>\n",
+ " <td id=\"T_637af_row2_col76\" class=\"data row2 col76\" >1.313805</td>\n",
+ " <td id=\"T_637af_row2_col77\" class=\"data row2 col77\" >1.073292</td>\n",
+ " <td id=\"T_637af_row2_col78\" class=\"data row2 col78\" >2.324940</td>\n",
+ " <td id=\"T_637af_row2_col79\" class=\"data row2 col79\" >-0.542720</td>\n",
+ " <td id=\"T_637af_row2_col80\" class=\"data row2 col80\" >-1.504292</td>\n",
+ " <td id=\"T_637af_row2_col81\" class=\"data row2 col81\" >0.777643</td>\n",
+ " <td id=\"T_637af_row2_col82\" class=\"data row2 col82\" >-0.618553</td>\n",
+ " <td id=\"T_637af_row2_col83\" class=\"data row2 col83\" >0.011342</td>\n",
+ " <td id=\"T_637af_row2_col84\" class=\"data row2 col84\" >1.385062</td>\n",
+ " <td id=\"T_637af_row2_col85\" class=\"data row2 col85\" >1.363552</td>\n",
+ " <td id=\"T_637af_row2_col86\" class=\"data row2 col86\" >-0.549834</td>\n",
+ " <td id=\"T_637af_row2_col87\" class=\"data row2 col87\" >0.688896</td>\n",
+ " <td id=\"T_637af_row2_col88\" class=\"data row2 col88\" >1.361288</td>\n",
+ " <td id=\"T_637af_row2_col89\" class=\"data row2 col89\" >-0.381137</td>\n",
+ " <td id=\"T_637af_row2_col90\" class=\"data row2 col90\" >0.797812</td>\n",
+ " <td id=\"T_637af_row2_col91\" class=\"data row2 col91\" >-1.128198</td>\n",
+ " <td id=\"T_637af_row2_col92\" class=\"data row2 col92\" >0.369208</td>\n",
+ " <td id=\"T_637af_row2_col93\" class=\"data row2 col93\" >0.540132</td>\n",
+ " <td id=\"T_637af_row2_col94\" class=\"data row2 col94\" >0.413853</td>\n",
+ " <td id=\"T_637af_row2_col95\" class=\"data row2 col95\" >-0.200308</td>\n",
+ " <td id=\"T_637af_row2_col96\" class=\"data row2 col96\" >-0.969126</td>\n",
+ " <td id=\"T_637af_row2_col97\" class=\"data row2 col97\" >0.981293</td>\n",
+ " <td id=\"T_637af_row2_col98\" class=\"data row2 col98\" >-0.009783</td>\n",
+ " <td id=\"T_637af_row2_col99\" class=\"data row2 col99\" >-0.320020</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n",
+ " <td id=\"T_637af_row3_col0\" class=\"data row3 col0\" >-0.574816</td>\n",
+ " <td id=\"T_637af_row3_col1\" class=\"data row3 col1\" >1.419977</td>\n",
+ " <td id=\"T_637af_row3_col2\" class=\"data row3 col2\" >0.434813</td>\n",
+ " <td id=\"T_637af_row3_col3\" class=\"data row3 col3\" >-1.101217</td>\n",
+ " <td id=\"T_637af_row3_col4\" class=\"data row3 col4\" >-1.586275</td>\n",
+ " <td id=\"T_637af_row3_col5\" class=\"data row3 col5\" >1.979573</td>\n",
+ " <td id=\"T_637af_row3_col6\" class=\"data row3 col6\" >0.378298</td>\n",
+ " <td id=\"T_637af_row3_col7\" class=\"data row3 col7\" >0.782326</td>\n",
+ " <td id=\"T_637af_row3_col8\" class=\"data row3 col8\" >2.178987</td>\n",
+ " <td id=\"T_637af_row3_col9\" class=\"data row3 col9\" >0.657564</td>\n",
+ " <td id=\"T_637af_row3_col10\" class=\"data row3 col10\" >0.683774</td>\n",
+ " <td id=\"T_637af_row3_col11\" class=\"data row3 col11\" >-0.091000</td>\n",
+ " <td id=\"T_637af_row3_col12\" class=\"data row3 col12\" >-0.059552</td>\n",
+ " <td id=\"T_637af_row3_col13\" class=\"data row3 col13\" >-0.738908</td>\n",
+ " <td id=\"T_637af_row3_col14\" class=\"data row3 col14\" >-0.907653</td>\n",
+ " <td id=\"T_637af_row3_col15\" class=\"data row3 col15\" >-0.701936</td>\n",
+ " <td id=\"T_637af_row3_col16\" class=\"data row3 col16\" >0.580039</td>\n",
+ " <td id=\"T_637af_row3_col17\" class=\"data row3 col17\" >-0.618757</td>\n",
+ " <td id=\"T_637af_row3_col18\" class=\"data row3 col18\" >0.453684</td>\n",
+ " <td id=\"T_637af_row3_col19\" class=\"data row3 col19\" >1.665382</td>\n",
+ " <td id=\"T_637af_row3_col20\" class=\"data row3 col20\" >-0.152321</td>\n",
+ " <td id=\"T_637af_row3_col21\" class=\"data row3 col21\" >0.880077</td>\n",
+ " <td id=\"T_637af_row3_col22\" class=\"data row3 col22\" >0.571073</td>\n",
+ " <td id=\"T_637af_row3_col23\" class=\"data row3 col23\" >-0.604736</td>\n",
+ " <td id=\"T_637af_row3_col24\" class=\"data row3 col24\" >0.532359</td>\n",
+ " <td id=\"T_637af_row3_col25\" class=\"data row3 col25\" >0.515031</td>\n",
+ " <td id=\"T_637af_row3_col26\" class=\"data row3 col26\" >-0.959844</td>\n",
+ " <td id=\"T_637af_row3_col27\" class=\"data row3 col27\" >-0.887184</td>\n",
+ " <td id=\"T_637af_row3_col28\" class=\"data row3 col28\" >0.435781</td>\n",
+ " <td id=\"T_637af_row3_col29\" class=\"data row3 col29\" >0.862093</td>\n",
+ " <td id=\"T_637af_row3_col30\" class=\"data row3 col30\" >-0.956321</td>\n",
+ " <td id=\"T_637af_row3_col31\" class=\"data row3 col31\" >-0.625909</td>\n",
+ " <td id=\"T_637af_row3_col32\" class=\"data row3 col32\" >0.194472</td>\n",
+ " <td id=\"T_637af_row3_col33\" class=\"data row3 col33\" >0.442490</td>\n",
+ " <td id=\"T_637af_row3_col34\" class=\"data row3 col34\" >0.526503</td>\n",
+ " <td id=\"T_637af_row3_col35\" class=\"data row3 col35\" >-0.215274</td>\n",
+ " <td id=\"T_637af_row3_col36\" class=\"data row3 col36\" >0.090711</td>\n",
+ " <td id=\"T_637af_row3_col37\" class=\"data row3 col37\" >0.932592</td>\n",
+ " <td id=\"T_637af_row3_col38\" class=\"data row3 col38\" >0.811999</td>\n",
+ " <td id=\"T_637af_row3_col39\" class=\"data row3 col39\" >-2.497026</td>\n",
+ " <td id=\"T_637af_row3_col40\" class=\"data row3 col40\" >0.631545</td>\n",
+ " <td id=\"T_637af_row3_col41\" class=\"data row3 col41\" >0.321418</td>\n",
+ " <td id=\"T_637af_row3_col42\" class=\"data row3 col42\" >-0.425549</td>\n",
+ " <td id=\"T_637af_row3_col43\" class=\"data row3 col43\" >-1.078832</td>\n",
+ " <td id=\"T_637af_row3_col44\" class=\"data row3 col44\" >0.753444</td>\n",
+ " <td id=\"T_637af_row3_col45\" class=\"data row3 col45\" >0.199790</td>\n",
+ " <td id=\"T_637af_row3_col46\" class=\"data row3 col46\" >-0.360526</td>\n",
+ " <td id=\"T_637af_row3_col47\" class=\"data row3 col47\" >-0.013448</td>\n",
+ " <td id=\"T_637af_row3_col48\" class=\"data row3 col48\" >-0.819476</td>\n",
+ " <td id=\"T_637af_row3_col49\" class=\"data row3 col49\" >0.814869</td>\n",
+ " <td id=\"T_637af_row3_col50\" class=\"data row3 col50\" >0.442118</td>\n",
+ " <td id=\"T_637af_row3_col51\" class=\"data row3 col51\" >-0.972048</td>\n",
+ " <td id=\"T_637af_row3_col52\" class=\"data row3 col52\" >-0.060603</td>\n",
+ " <td id=\"T_637af_row3_col53\" class=\"data row3 col53\" >-2.349825</td>\n",
+ " <td id=\"T_637af_row3_col54\" class=\"data row3 col54\" >1.265445</td>\n",
+ " <td id=\"T_637af_row3_col55\" class=\"data row3 col55\" >-0.573257</td>\n",
+ " <td id=\"T_637af_row3_col56\" class=\"data row3 col56\" >0.429124</td>\n",
+ " <td id=\"T_637af_row3_col57\" class=\"data row3 col57\" >1.049783</td>\n",
+ " <td id=\"T_637af_row3_col58\" class=\"data row3 col58\" >1.954773</td>\n",
+ " <td id=\"T_637af_row3_col59\" class=\"data row3 col59\" >0.071883</td>\n",
+ " <td id=\"T_637af_row3_col60\" class=\"data row3 col60\" >-0.094209</td>\n",
+ " <td id=\"T_637af_row3_col61\" class=\"data row3 col61\" >0.265616</td>\n",
+ " <td id=\"T_637af_row3_col62\" class=\"data row3 col62\" >0.948318</td>\n",
+ " <td id=\"T_637af_row3_col63\" class=\"data row3 col63\" >0.331645</td>\n",
+ " <td id=\"T_637af_row3_col64\" class=\"data row3 col64\" >1.343401</td>\n",
+ " <td id=\"T_637af_row3_col65\" class=\"data row3 col65\" >-0.167934</td>\n",
+ " <td id=\"T_637af_row3_col66\" class=\"data row3 col66\" >-1.105252</td>\n",
+ " <td id=\"T_637af_row3_col67\" class=\"data row3 col67\" >-0.167077</td>\n",
+ " <td id=\"T_637af_row3_col68\" class=\"data row3 col68\" >-0.096576</td>\n",
+ " <td id=\"T_637af_row3_col69\" class=\"data row3 col69\" >-0.838161</td>\n",
+ " <td id=\"T_637af_row3_col70\" class=\"data row3 col70\" >-0.208564</td>\n",
+ " <td id=\"T_637af_row3_col71\" class=\"data row3 col71\" >0.394534</td>\n",
+ " <td id=\"T_637af_row3_col72\" class=\"data row3 col72\" >0.762533</td>\n",
+ " <td id=\"T_637af_row3_col73\" class=\"data row3 col73\" >1.235357</td>\n",
+ " <td id=\"T_637af_row3_col74\" class=\"data row3 col74\" >-0.207282</td>\n",
+ " <td id=\"T_637af_row3_col75\" class=\"data row3 col75\" >-0.202946</td>\n",
+ " <td id=\"T_637af_row3_col76\" class=\"data row3 col76\" >-0.468025</td>\n",
+ " <td id=\"T_637af_row3_col77\" class=\"data row3 col77\" >0.256944</td>\n",
+ " <td id=\"T_637af_row3_col78\" class=\"data row3 col78\" >2.587584</td>\n",
+ " <td id=\"T_637af_row3_col79\" class=\"data row3 col79\" >1.186697</td>\n",
+ " <td id=\"T_637af_row3_col80\" class=\"data row3 col80\" >-1.031903</td>\n",
+ " <td id=\"T_637af_row3_col81\" class=\"data row3 col81\" >1.428316</td>\n",
+ " <td id=\"T_637af_row3_col82\" class=\"data row3 col82\" >0.658899</td>\n",
+ " <td id=\"T_637af_row3_col83\" class=\"data row3 col83\" >-0.046582</td>\n",
+ " <td id=\"T_637af_row3_col84\" class=\"data row3 col84\" >-0.075422</td>\n",
+ " <td id=\"T_637af_row3_col85\" class=\"data row3 col85\" >1.329359</td>\n",
+ " <td id=\"T_637af_row3_col86\" class=\"data row3 col86\" >-0.684267</td>\n",
+ " <td id=\"T_637af_row3_col87\" class=\"data row3 col87\" >-1.524182</td>\n",
+ " <td id=\"T_637af_row3_col88\" class=\"data row3 col88\" >2.014061</td>\n",
+ " <td id=\"T_637af_row3_col89\" class=\"data row3 col89\" >3.770933</td>\n",
+ " <td id=\"T_637af_row3_col90\" class=\"data row3 col90\" >0.647353</td>\n",
+ " <td id=\"T_637af_row3_col91\" class=\"data row3 col91\" >-1.021377</td>\n",
+ " <td id=\"T_637af_row3_col92\" class=\"data row3 col92\" >-0.345493</td>\n",
+ " <td id=\"T_637af_row3_col93\" class=\"data row3 col93\" >0.582811</td>\n",
+ " <td id=\"T_637af_row3_col94\" class=\"data row3 col94\" >0.797812</td>\n",
+ " <td id=\"T_637af_row3_col95\" class=\"data row3 col95\" >1.326020</td>\n",
+ " <td id=\"T_637af_row3_col96\" class=\"data row3 col96\" >1.422857</td>\n",
+ " <td id=\"T_637af_row3_col97\" class=\"data row3 col97\" >-3.077007</td>\n",
+ " <td id=\"T_637af_row3_col98\" class=\"data row3 col98\" >0.184083</td>\n",
+ " <td id=\"T_637af_row3_col99\" class=\"data row3 col99\" >1.478935</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n",
+ " <td id=\"T_637af_row4_col0\" class=\"data row4 col0\" >-0.600142</td>\n",
+ " <td id=\"T_637af_row4_col1\" class=\"data row4 col1\" >1.929561</td>\n",
+ " <td id=\"T_637af_row4_col2\" class=\"data row4 col2\" >-2.346771</td>\n",
+ " <td id=\"T_637af_row4_col3\" class=\"data row4 col3\" >-0.669700</td>\n",
+ " <td id=\"T_637af_row4_col4\" class=\"data row4 col4\" >-1.165258</td>\n",
+ " <td id=\"T_637af_row4_col5\" class=\"data row4 col5\" >0.814788</td>\n",
+ " <td id=\"T_637af_row4_col6\" class=\"data row4 col6\" >0.444449</td>\n",
+ " <td id=\"T_637af_row4_col7\" class=\"data row4 col7\" >-0.576758</td>\n",
+ " <td id=\"T_637af_row4_col8\" class=\"data row4 col8\" >0.353091</td>\n",
+ " <td id=\"T_637af_row4_col9\" class=\"data row4 col9\" >0.408893</td>\n",
+ " <td id=\"T_637af_row4_col10\" class=\"data row4 col10\" >0.091391</td>\n",
+ " <td id=\"T_637af_row4_col11\" class=\"data row4 col11\" >-2.294389</td>\n",
+ " <td id=\"T_637af_row4_col12\" class=\"data row4 col12\" >0.485506</td>\n",
+ " <td id=\"T_637af_row4_col13\" class=\"data row4 col13\" >-0.081304</td>\n",
+ " <td id=\"T_637af_row4_col14\" class=\"data row4 col14\" >-0.716272</td>\n",
+ " <td id=\"T_637af_row4_col15\" class=\"data row4 col15\" >-1.648010</td>\n",
+ " <td id=\"T_637af_row4_col16\" class=\"data row4 col16\" >1.005361</td>\n",
+ " <td id=\"T_637af_row4_col17\" class=\"data row4 col17\" >-1.489603</td>\n",
+ " <td id=\"T_637af_row4_col18\" class=\"data row4 col18\" >0.363098</td>\n",
+ " <td id=\"T_637af_row4_col19\" class=\"data row4 col19\" >0.758602</td>\n",
+ " <td id=\"T_637af_row4_col20\" class=\"data row4 col20\" >-1.373847</td>\n",
+ " <td id=\"T_637af_row4_col21\" class=\"data row4 col21\" >-0.972057</td>\n",
+ " <td id=\"T_637af_row4_col22\" class=\"data row4 col22\" >1.988537</td>\n",
+ " <td id=\"T_637af_row4_col23\" class=\"data row4 col23\" >0.319829</td>\n",
+ " <td id=\"T_637af_row4_col24\" class=\"data row4 col24\" >1.169060</td>\n",
+ " <td id=\"T_637af_row4_col25\" class=\"data row4 col25\" >0.146585</td>\n",
+ " <td id=\"T_637af_row4_col26\" class=\"data row4 col26\" >1.030388</td>\n",
+ " <td id=\"T_637af_row4_col27\" class=\"data row4 col27\" >1.165984</td>\n",
+ " <td id=\"T_637af_row4_col28\" class=\"data row4 col28\" >1.369563</td>\n",
+ " <td id=\"T_637af_row4_col29\" class=\"data row4 col29\" >0.730984</td>\n",
+ " <td id=\"T_637af_row4_col30\" class=\"data row4 col30\" >-1.383696</td>\n",
+ " <td id=\"T_637af_row4_col31\" class=\"data row4 col31\" >-0.515189</td>\n",
+ " <td id=\"T_637af_row4_col32\" class=\"data row4 col32\" >-0.808927</td>\n",
+ " <td id=\"T_637af_row4_col33\" class=\"data row4 col33\" >-1.174651</td>\n",
+ " <td id=\"T_637af_row4_col34\" class=\"data row4 col34\" >-1.631502</td>\n",
+ " <td id=\"T_637af_row4_col35\" class=\"data row4 col35\" >-1.123414</td>\n",
+ " <td id=\"T_637af_row4_col36\" class=\"data row4 col36\" >-0.478155</td>\n",
+ " <td id=\"T_637af_row4_col37\" class=\"data row4 col37\" >-1.583067</td>\n",
+ " <td id=\"T_637af_row4_col38\" class=\"data row4 col38\" >1.419074</td>\n",
+ " <td id=\"T_637af_row4_col39\" class=\"data row4 col39\" >1.668777</td>\n",
+ " <td id=\"T_637af_row4_col40\" class=\"data row4 col40\" >1.567517</td>\n",
+ " <td id=\"T_637af_row4_col41\" class=\"data row4 col41\" >0.222103</td>\n",
+ " <td id=\"T_637af_row4_col42\" class=\"data row4 col42\" >-0.336040</td>\n",
+ " <td id=\"T_637af_row4_col43\" class=\"data row4 col43\" >-1.352064</td>\n",
+ " <td id=\"T_637af_row4_col44\" class=\"data row4 col44\" >0.251032</td>\n",
+ " <td id=\"T_637af_row4_col45\" class=\"data row4 col45\" >-0.401695</td>\n",
+ " <td id=\"T_637af_row4_col46\" class=\"data row4 col46\" >0.268413</td>\n",
+ " <td id=\"T_637af_row4_col47\" class=\"data row4 col47\" >-0.012299</td>\n",
+ " <td id=\"T_637af_row4_col48\" class=\"data row4 col48\" >-0.918953</td>\n",
+ " <td id=\"T_637af_row4_col49\" class=\"data row4 col49\" >2.921208</td>\n",
+ " <td id=\"T_637af_row4_col50\" class=\"data row4 col50\" >-0.581588</td>\n",
+ " <td id=\"T_637af_row4_col51\" class=\"data row4 col51\" >0.672848</td>\n",
+ " <td id=\"T_637af_row4_col52\" class=\"data row4 col52\" >1.251136</td>\n",
+ " <td id=\"T_637af_row4_col53\" class=\"data row4 col53\" >1.382263</td>\n",
+ " <td id=\"T_637af_row4_col54\" class=\"data row4 col54\" >1.429897</td>\n",
+ " <td id=\"T_637af_row4_col55\" class=\"data row4 col55\" >1.290990</td>\n",
+ " <td id=\"T_637af_row4_col56\" class=\"data row4 col56\" >-1.272673</td>\n",
+ " <td id=\"T_637af_row4_col57\" class=\"data row4 col57\" >-0.308611</td>\n",
+ " <td id=\"T_637af_row4_col58\" class=\"data row4 col58\" >-0.422988</td>\n",
+ " <td id=\"T_637af_row4_col59\" class=\"data row4 col59\" >-0.675642</td>\n",
+ " <td id=\"T_637af_row4_col60\" class=\"data row4 col60\" >0.874441</td>\n",
+ " <td id=\"T_637af_row4_col61\" class=\"data row4 col61\" >1.305736</td>\n",
+ " <td id=\"T_637af_row4_col62\" class=\"data row4 col62\" >-0.262585</td>\n",
+ " <td id=\"T_637af_row4_col63\" class=\"data row4 col63\" >-1.099395</td>\n",
+ " <td id=\"T_637af_row4_col64\" class=\"data row4 col64\" >-0.667101</td>\n",
+ " <td id=\"T_637af_row4_col65\" class=\"data row4 col65\" >-0.646737</td>\n",
+ " <td id=\"T_637af_row4_col66\" class=\"data row4 col66\" >-0.556338</td>\n",
+ " <td id=\"T_637af_row4_col67\" class=\"data row4 col67\" >-0.196591</td>\n",
+ " <td id=\"T_637af_row4_col68\" class=\"data row4 col68\" >0.119306</td>\n",
+ " <td id=\"T_637af_row4_col69\" class=\"data row4 col69\" >-0.266455</td>\n",
+ " <td id=\"T_637af_row4_col70\" class=\"data row4 col70\" >-0.524267</td>\n",
+ " <td id=\"T_637af_row4_col71\" class=\"data row4 col71\" >2.650951</td>\n",
+ " <td id=\"T_637af_row4_col72\" class=\"data row4 col72\" >0.097318</td>\n",
+ " <td id=\"T_637af_row4_col73\" class=\"data row4 col73\" >-0.974697</td>\n",
+ " <td id=\"T_637af_row4_col74\" class=\"data row4 col74\" >0.189964</td>\n",
+ " <td id=\"T_637af_row4_col75\" class=\"data row4 col75\" >1.141155</td>\n",
+ " <td id=\"T_637af_row4_col76\" class=\"data row4 col76\" >-0.064434</td>\n",
+ " <td id=\"T_637af_row4_col77\" class=\"data row4 col77\" >1.104971</td>\n",
+ " <td id=\"T_637af_row4_col78\" class=\"data row4 col78\" >-1.508908</td>\n",
+ " <td id=\"T_637af_row4_col79\" class=\"data row4 col79\" >-0.031833</td>\n",
+ " <td id=\"T_637af_row4_col80\" class=\"data row4 col80\" >0.803919</td>\n",
+ " <td id=\"T_637af_row4_col81\" class=\"data row4 col81\" >-0.659221</td>\n",
+ " <td id=\"T_637af_row4_col82\" class=\"data row4 col82\" >0.939145</td>\n",
+ " <td id=\"T_637af_row4_col83\" class=\"data row4 col83\" >0.214041</td>\n",
+ " <td id=\"T_637af_row4_col84\" class=\"data row4 col84\" >-0.531805</td>\n",
+ " <td id=\"T_637af_row4_col85\" class=\"data row4 col85\" >0.956060</td>\n",
+ " <td id=\"T_637af_row4_col86\" class=\"data row4 col86\" >0.249328</td>\n",
+ " <td id=\"T_637af_row4_col87\" class=\"data row4 col87\" >0.637903</td>\n",
+ " <td id=\"T_637af_row4_col88\" class=\"data row4 col88\" >-0.510158</td>\n",
+ " <td id=\"T_637af_row4_col89\" class=\"data row4 col89\" >1.850287</td>\n",
+ " <td id=\"T_637af_row4_col90\" class=\"data row4 col90\" >-0.348407</td>\n",
+ " <td id=\"T_637af_row4_col91\" class=\"data row4 col91\" >2.001376</td>\n",
+ " <td id=\"T_637af_row4_col92\" class=\"data row4 col92\" >-0.389643</td>\n",
+ " <td id=\"T_637af_row4_col93\" class=\"data row4 col93\" >-0.024786</td>\n",
+ " <td id=\"T_637af_row4_col94\" class=\"data row4 col94\" >-0.470973</td>\n",
+ " <td id=\"T_637af_row4_col95\" class=\"data row4 col95\" >0.869339</td>\n",
+ " <td id=\"T_637af_row4_col96\" class=\"data row4 col96\" >0.170667</td>\n",
+ " <td id=\"T_637af_row4_col97\" class=\"data row4 col97\" >0.598062</td>\n",
+ " <td id=\"T_637af_row4_col98\" class=\"data row4 col98\" >1.217262</td>\n",
+ " <td id=\"T_637af_row4_col99\" class=\"data row4 col99\" >1.274013</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n",
+ " <td id=\"T_637af_row5_col0\" class=\"data row5 col0\" >-0.389981</td>\n",
+ " <td id=\"T_637af_row5_col1\" class=\"data row5 col1\" >-0.752441</td>\n",
+ " <td id=\"T_637af_row5_col2\" class=\"data row5 col2\" >-0.734871</td>\n",
+ " <td id=\"T_637af_row5_col3\" class=\"data row5 col3\" >3.517318</td>\n",
+ " <td id=\"T_637af_row5_col4\" class=\"data row5 col4\" >-1.173559</td>\n",
+ " <td id=\"T_637af_row5_col5\" class=\"data row5 col5\" >-0.004956</td>\n",
+ " <td id=\"T_637af_row5_col6\" class=\"data row5 col6\" >0.145419</td>\n",
+ " <td id=\"T_637af_row5_col7\" class=\"data row5 col7\" >2.151368</td>\n",
+ " <td id=\"T_637af_row5_col8\" class=\"data row5 col8\" >-3.086037</td>\n",
+ " <td id=\"T_637af_row5_col9\" class=\"data row5 col9\" >-1.569139</td>\n",
+ " <td id=\"T_637af_row5_col10\" class=\"data row5 col10\" >1.449784</td>\n",
+ " <td id=\"T_637af_row5_col11\" class=\"data row5 col11\" >-0.868951</td>\n",
+ " <td id=\"T_637af_row5_col12\" class=\"data row5 col12\" >-1.687716</td>\n",
+ " <td id=\"T_637af_row5_col13\" class=\"data row5 col13\" >-0.994401</td>\n",
+ " <td id=\"T_637af_row5_col14\" class=\"data row5 col14\" >1.153266</td>\n",
+ " <td id=\"T_637af_row5_col15\" class=\"data row5 col15\" >1.803045</td>\n",
+ " <td id=\"T_637af_row5_col16\" class=\"data row5 col16\" >-0.819059</td>\n",
+ " <td id=\"T_637af_row5_col17\" class=\"data row5 col17\" >0.847970</td>\n",
+ " <td id=\"T_637af_row5_col18\" class=\"data row5 col18\" >0.227102</td>\n",
+ " <td id=\"T_637af_row5_col19\" class=\"data row5 col19\" >-0.500762</td>\n",
+ " <td id=\"T_637af_row5_col20\" class=\"data row5 col20\" >0.868210</td>\n",
+ " <td id=\"T_637af_row5_col21\" class=\"data row5 col21\" >1.823540</td>\n",
+ " <td id=\"T_637af_row5_col22\" class=\"data row5 col22\" >1.161007</td>\n",
+ " <td id=\"T_637af_row5_col23\" class=\"data row5 col23\" >-0.307606</td>\n",
+ " <td id=\"T_637af_row5_col24\" class=\"data row5 col24\" >-0.713416</td>\n",
+ " <td id=\"T_637af_row5_col25\" class=\"data row5 col25\" >0.363560</td>\n",
+ " <td id=\"T_637af_row5_col26\" class=\"data row5 col26\" >-0.822162</td>\n",
+ " <td id=\"T_637af_row5_col27\" class=\"data row5 col27\" >2.427681</td>\n",
+ " <td id=\"T_637af_row5_col28\" class=\"data row5 col28\" >-0.129537</td>\n",
+ " <td id=\"T_637af_row5_col29\" class=\"data row5 col29\" >-0.078716</td>\n",
+ " <td id=\"T_637af_row5_col30\" class=\"data row5 col30\" >1.345644</td>\n",
+ " <td id=\"T_637af_row5_col31\" class=\"data row5 col31\" >-1.286094</td>\n",
+ " <td id=\"T_637af_row5_col32\" class=\"data row5 col32\" >0.237242</td>\n",
+ " <td id=\"T_637af_row5_col33\" class=\"data row5 col33\" >-0.136056</td>\n",
+ " <td id=\"T_637af_row5_col34\" class=\"data row5 col34\" >0.596664</td>\n",
+ " <td id=\"T_637af_row5_col35\" class=\"data row5 col35\" >-1.412381</td>\n",
+ " <td id=\"T_637af_row5_col36\" class=\"data row5 col36\" >1.206341</td>\n",
+ " <td id=\"T_637af_row5_col37\" class=\"data row5 col37\" >0.299860</td>\n",
+ " <td id=\"T_637af_row5_col38\" class=\"data row5 col38\" >0.705238</td>\n",
+ " <td id=\"T_637af_row5_col39\" class=\"data row5 col39\" >0.142412</td>\n",
+ " <td id=\"T_637af_row5_col40\" class=\"data row5 col40\" >-1.059382</td>\n",
+ " <td id=\"T_637af_row5_col41\" class=\"data row5 col41\" >0.833468</td>\n",
+ " <td id=\"T_637af_row5_col42\" class=\"data row5 col42\" >1.060015</td>\n",
+ " <td id=\"T_637af_row5_col43\" class=\"data row5 col43\" >-0.527045</td>\n",
+ " <td id=\"T_637af_row5_col44\" class=\"data row5 col44\" >-1.135732</td>\n",
+ " <td id=\"T_637af_row5_col45\" class=\"data row5 col45\" >-1.140983</td>\n",
+ " <td id=\"T_637af_row5_col46\" class=\"data row5 col46\" >-0.779540</td>\n",
+ " <td id=\"T_637af_row5_col47\" class=\"data row5 col47\" >-0.640875</td>\n",
+ " <td id=\"T_637af_row5_col48\" class=\"data row5 col48\" >-1.217196</td>\n",
+ " <td id=\"T_637af_row5_col49\" class=\"data row5 col49\" >-1.675663</td>\n",
+ " <td id=\"T_637af_row5_col50\" class=\"data row5 col50\" >0.241263</td>\n",
+ " <td id=\"T_637af_row5_col51\" class=\"data row5 col51\" >-0.273322</td>\n",
+ " <td id=\"T_637af_row5_col52\" class=\"data row5 col52\" >-1.697936</td>\n",
+ " <td id=\"T_637af_row5_col53\" class=\"data row5 col53\" >-0.594943</td>\n",
+ " <td id=\"T_637af_row5_col54\" class=\"data row5 col54\" >0.101154</td>\n",
+ " <td id=\"T_637af_row5_col55\" class=\"data row5 col55\" >1.391735</td>\n",
+ " <td id=\"T_637af_row5_col56\" class=\"data row5 col56\" >-0.426953</td>\n",
+ " <td id=\"T_637af_row5_col57\" class=\"data row5 col57\" >1.008344</td>\n",
+ " <td id=\"T_637af_row5_col58\" class=\"data row5 col58\" >-0.818577</td>\n",
+ " <td id=\"T_637af_row5_col59\" class=\"data row5 col59\" >1.924570</td>\n",
+ " <td id=\"T_637af_row5_col60\" class=\"data row5 col60\" >-0.578900</td>\n",
+ " <td id=\"T_637af_row5_col61\" class=\"data row5 col61\" >-0.457395</td>\n",
+ " <td id=\"T_637af_row5_col62\" class=\"data row5 col62\" >-1.096705</td>\n",
+ " <td id=\"T_637af_row5_col63\" class=\"data row5 col63\" >0.418522</td>\n",
+ " <td id=\"T_637af_row5_col64\" class=\"data row5 col64\" >-0.155623</td>\n",
+ " <td id=\"T_637af_row5_col65\" class=\"data row5 col65\" >0.169706</td>\n",
+ " <td id=\"T_637af_row5_col66\" class=\"data row5 col66\" >-2.533706</td>\n",
+ " <td id=\"T_637af_row5_col67\" class=\"data row5 col67\" >0.018904</td>\n",
+ " <td id=\"T_637af_row5_col68\" class=\"data row5 col68\" >1.434160</td>\n",
+ " <td id=\"T_637af_row5_col69\" class=\"data row5 col69\" >0.744095</td>\n",
+ " <td id=\"T_637af_row5_col70\" class=\"data row5 col70\" >0.647626</td>\n",
+ " <td id=\"T_637af_row5_col71\" class=\"data row5 col71\" >-0.770309</td>\n",
+ " <td id=\"T_637af_row5_col72\" class=\"data row5 col72\" >2.329141</td>\n",
+ " <td id=\"T_637af_row5_col73\" class=\"data row5 col73\" >-0.141547</td>\n",
+ " <td id=\"T_637af_row5_col74\" class=\"data row5 col74\" >-1.761594</td>\n",
+ " <td id=\"T_637af_row5_col75\" class=\"data row5 col75\" >0.702091</td>\n",
+ " <td id=\"T_637af_row5_col76\" class=\"data row5 col76\" >-1.531450</td>\n",
+ " <td id=\"T_637af_row5_col77\" class=\"data row5 col77\" >-0.788427</td>\n",
+ " <td id=\"T_637af_row5_col78\" class=\"data row5 col78\" >-0.184622</td>\n",
+ " <td id=\"T_637af_row5_col79\" class=\"data row5 col79\" >-1.942321</td>\n",
+ " <td id=\"T_637af_row5_col80\" class=\"data row5 col80\" >1.530113</td>\n",
+ " <td id=\"T_637af_row5_col81\" class=\"data row5 col81\" >0.503406</td>\n",
+ " <td id=\"T_637af_row5_col82\" class=\"data row5 col82\" >1.105845</td>\n",
+ " <td id=\"T_637af_row5_col83\" class=\"data row5 col83\" >-0.935120</td>\n",
+ " <td id=\"T_637af_row5_col84\" class=\"data row5 col84\" >-1.115483</td>\n",
+ " <td id=\"T_637af_row5_col85\" class=\"data row5 col85\" >-2.249762</td>\n",
+ " <td id=\"T_637af_row5_col86\" class=\"data row5 col86\" >1.307135</td>\n",
+ " <td id=\"T_637af_row5_col87\" class=\"data row5 col87\" >0.788412</td>\n",
+ " <td id=\"T_637af_row5_col88\" class=\"data row5 col88\" >-0.441091</td>\n",
+ " <td id=\"T_637af_row5_col89\" class=\"data row5 col89\" >0.073561</td>\n",
+ " <td id=\"T_637af_row5_col90\" class=\"data row5 col90\" >0.812101</td>\n",
+ " <td id=\"T_637af_row5_col91\" class=\"data row5 col91\" >-0.916146</td>\n",
+ " <td id=\"T_637af_row5_col92\" class=\"data row5 col92\" >1.573714</td>\n",
+ " <td id=\"T_637af_row5_col93\" class=\"data row5 col93\" >-0.309508</td>\n",
+ " <td id=\"T_637af_row5_col94\" class=\"data row5 col94\" >0.499987</td>\n",
+ " <td id=\"T_637af_row5_col95\" class=\"data row5 col95\" >0.187594</td>\n",
+ " <td id=\"T_637af_row5_col96\" class=\"data row5 col96\" >0.558913</td>\n",
+ " <td id=\"T_637af_row5_col97\" class=\"data row5 col97\" >0.903246</td>\n",
+ " <td id=\"T_637af_row5_col98\" class=\"data row5 col98\" >0.317901</td>\n",
+ " <td id=\"T_637af_row5_col99\" class=\"data row5 col99\" >-0.809797</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n",
+ " <td id=\"T_637af_row6_col0\" class=\"data row6 col0\" >1.128248</td>\n",
+ " <td id=\"T_637af_row6_col1\" class=\"data row6 col1\" >1.516826</td>\n",
+ " <td id=\"T_637af_row6_col2\" class=\"data row6 col2\" >-0.186735</td>\n",
+ " <td id=\"T_637af_row6_col3\" class=\"data row6 col3\" >-0.668157</td>\n",
+ " <td id=\"T_637af_row6_col4\" class=\"data row6 col4\" >1.132259</td>\n",
+ " <td id=\"T_637af_row6_col5\" class=\"data row6 col5\" >-0.246648</td>\n",
+ " <td id=\"T_637af_row6_col6\" class=\"data row6 col6\" >-0.855167</td>\n",
+ " <td id=\"T_637af_row6_col7\" class=\"data row6 col7\" >0.732283</td>\n",
+ " <td id=\"T_637af_row6_col8\" class=\"data row6 col8\" >0.931802</td>\n",
+ " <td id=\"T_637af_row6_col9\" class=\"data row6 col9\" >1.318684</td>\n",
+ " <td id=\"T_637af_row6_col10\" class=\"data row6 col10\" >-1.198418</td>\n",
+ " <td id=\"T_637af_row6_col11\" class=\"data row6 col11\" >-1.149318</td>\n",
+ " <td id=\"T_637af_row6_col12\" class=\"data row6 col12\" >0.586321</td>\n",
+ " <td id=\"T_637af_row6_col13\" class=\"data row6 col13\" >-1.171937</td>\n",
+ " <td id=\"T_637af_row6_col14\" class=\"data row6 col14\" >-0.607731</td>\n",
+ " <td id=\"T_637af_row6_col15\" class=\"data row6 col15\" >2.753747</td>\n",
+ " <td id=\"T_637af_row6_col16\" class=\"data row6 col16\" >1.479287</td>\n",
+ " <td id=\"T_637af_row6_col17\" class=\"data row6 col17\" >-1.136365</td>\n",
+ " <td id=\"T_637af_row6_col18\" class=\"data row6 col18\" >-0.020485</td>\n",
+ " <td id=\"T_637af_row6_col19\" class=\"data row6 col19\" >0.320444</td>\n",
+ " <td id=\"T_637af_row6_col20\" class=\"data row6 col20\" >-1.955755</td>\n",
+ " <td id=\"T_637af_row6_col21\" class=\"data row6 col21\" >0.660402</td>\n",
+ " <td id=\"T_637af_row6_col22\" class=\"data row6 col22\" >-1.545371</td>\n",
+ " <td id=\"T_637af_row6_col23\" class=\"data row6 col23\" >0.200519</td>\n",
+ " <td id=\"T_637af_row6_col24\" class=\"data row6 col24\" >-0.017263</td>\n",
+ " <td id=\"T_637af_row6_col25\" class=\"data row6 col25\" >1.634686</td>\n",
+ " <td id=\"T_637af_row6_col26\" class=\"data row6 col26\" >0.599246</td>\n",
+ " <td id=\"T_637af_row6_col27\" class=\"data row6 col27\" >0.462989</td>\n",
+ " <td id=\"T_637af_row6_col28\" class=\"data row6 col28\" >0.023721</td>\n",
+ " <td id=\"T_637af_row6_col29\" class=\"data row6 col29\" >0.225546</td>\n",
+ " <td id=\"T_637af_row6_col30\" class=\"data row6 col30\" >0.170972</td>\n",
+ " <td id=\"T_637af_row6_col31\" class=\"data row6 col31\" >-0.027496</td>\n",
+ " <td id=\"T_637af_row6_col32\" class=\"data row6 col32\" >-0.061233</td>\n",
+ " <td id=\"T_637af_row6_col33\" class=\"data row6 col33\" >-0.566411</td>\n",
+ " <td id=\"T_637af_row6_col34\" class=\"data row6 col34\" >-0.669567</td>\n",
+ " <td id=\"T_637af_row6_col35\" class=\"data row6 col35\" >0.601618</td>\n",
+ " <td id=\"T_637af_row6_col36\" class=\"data row6 col36\" >0.503656</td>\n",
+ " <td id=\"T_637af_row6_col37\" class=\"data row6 col37\" >-0.678253</td>\n",
+ " <td id=\"T_637af_row6_col38\" class=\"data row6 col38\" >-2.907108</td>\n",
+ " <td id=\"T_637af_row6_col39\" class=\"data row6 col39\" >-1.717123</td>\n",
+ " <td id=\"T_637af_row6_col40\" class=\"data row6 col40\" >0.397631</td>\n",
+ " <td id=\"T_637af_row6_col41\" class=\"data row6 col41\" >1.300108</td>\n",
+ " <td id=\"T_637af_row6_col42\" class=\"data row6 col42\" >0.215821</td>\n",
+ " <td id=\"T_637af_row6_col43\" class=\"data row6 col43\" >-0.593075</td>\n",
+ " <td id=\"T_637af_row6_col44\" class=\"data row6 col44\" >-0.225944</td>\n",
+ " <td id=\"T_637af_row6_col45\" class=\"data row6 col45\" >-0.946057</td>\n",
+ " <td id=\"T_637af_row6_col46\" class=\"data row6 col46\" >1.000308</td>\n",
+ " <td id=\"T_637af_row6_col47\" class=\"data row6 col47\" >0.393160</td>\n",
+ " <td id=\"T_637af_row6_col48\" class=\"data row6 col48\" >1.342074</td>\n",
+ " <td id=\"T_637af_row6_col49\" class=\"data row6 col49\" >-0.370687</td>\n",
+ " <td id=\"T_637af_row6_col50\" class=\"data row6 col50\" >-0.166413</td>\n",
+ " <td id=\"T_637af_row6_col51\" class=\"data row6 col51\" >-0.419814</td>\n",
+ " <td id=\"T_637af_row6_col52\" class=\"data row6 col52\" >-0.255931</td>\n",
+ " <td id=\"T_637af_row6_col53\" class=\"data row6 col53\" >1.789478</td>\n",
+ " <td id=\"T_637af_row6_col54\" class=\"data row6 col54\" >0.282378</td>\n",
+ " <td id=\"T_637af_row6_col55\" class=\"data row6 col55\" >0.742260</td>\n",
+ " <td id=\"T_637af_row6_col56\" class=\"data row6 col56\" >-0.050498</td>\n",
+ " <td id=\"T_637af_row6_col57\" class=\"data row6 col57\" >1.415309</td>\n",
+ " <td id=\"T_637af_row6_col58\" class=\"data row6 col58\" >0.838166</td>\n",
+ " <td id=\"T_637af_row6_col59\" class=\"data row6 col59\" >-1.400292</td>\n",
+ " <td id=\"T_637af_row6_col60\" class=\"data row6 col60\" >-0.937976</td>\n",
+ " <td id=\"T_637af_row6_col61\" class=\"data row6 col61\" >-1.499148</td>\n",
+ " <td id=\"T_637af_row6_col62\" class=\"data row6 col62\" >0.801859</td>\n",
+ " <td id=\"T_637af_row6_col63\" class=\"data row6 col63\" >0.224824</td>\n",
+ " <td id=\"T_637af_row6_col64\" class=\"data row6 col64\" >0.283572</td>\n",
+ " <td id=\"T_637af_row6_col65\" class=\"data row6 col65\" >0.643703</td>\n",
+ " <td id=\"T_637af_row6_col66\" class=\"data row6 col66\" >-1.198465</td>\n",
+ " <td id=\"T_637af_row6_col67\" class=\"data row6 col67\" >0.527206</td>\n",
+ " <td id=\"T_637af_row6_col68\" class=\"data row6 col68\" >0.215202</td>\n",
+ " <td id=\"T_637af_row6_col69\" class=\"data row6 col69\" >0.437048</td>\n",
+ " <td id=\"T_637af_row6_col70\" class=\"data row6 col70\" >1.312868</td>\n",
+ " <td id=\"T_637af_row6_col71\" class=\"data row6 col71\" >0.741243</td>\n",
+ " <td id=\"T_637af_row6_col72\" class=\"data row6 col72\" >0.077988</td>\n",
+ " <td id=\"T_637af_row6_col73\" class=\"data row6 col73\" >0.006123</td>\n",
+ " <td id=\"T_637af_row6_col74\" class=\"data row6 col74\" >0.190370</td>\n",
+ " <td id=\"T_637af_row6_col75\" class=\"data row6 col75\" >0.018007</td>\n",
+ " <td id=\"T_637af_row6_col76\" class=\"data row6 col76\" >-1.026036</td>\n",
+ " <td id=\"T_637af_row6_col77\" class=\"data row6 col77\" >-2.378430</td>\n",
+ " <td id=\"T_637af_row6_col78\" class=\"data row6 col78\" >-1.069949</td>\n",
+ " <td id=\"T_637af_row6_col79\" class=\"data row6 col79\" >0.843822</td>\n",
+ " <td id=\"T_637af_row6_col80\" class=\"data row6 col80\" >1.289216</td>\n",
+ " <td id=\"T_637af_row6_col81\" class=\"data row6 col81\" >-1.423369</td>\n",
+ " <td id=\"T_637af_row6_col82\" class=\"data row6 col82\" >-0.462887</td>\n",
+ " <td id=\"T_637af_row6_col83\" class=\"data row6 col83\" >0.197330</td>\n",
+ " <td id=\"T_637af_row6_col84\" class=\"data row6 col84\" >-0.935076</td>\n",
+ " <td id=\"T_637af_row6_col85\" class=\"data row6 col85\" >0.441271</td>\n",
+ " <td id=\"T_637af_row6_col86\" class=\"data row6 col86\" >0.414643</td>\n",
+ " <td id=\"T_637af_row6_col87\" class=\"data row6 col87\" >-0.377887</td>\n",
+ " <td id=\"T_637af_row6_col88\" class=\"data row6 col88\" >-0.530515</td>\n",
+ " <td id=\"T_637af_row6_col89\" class=\"data row6 col89\" >0.621592</td>\n",
+ " <td id=\"T_637af_row6_col90\" class=\"data row6 col90\" >1.009572</td>\n",
+ " <td id=\"T_637af_row6_col91\" class=\"data row6 col91\" >0.569718</td>\n",
+ " <td id=\"T_637af_row6_col92\" class=\"data row6 col92\" >0.175291</td>\n",
+ " <td id=\"T_637af_row6_col93\" class=\"data row6 col93\" >-0.656279</td>\n",
+ " <td id=\"T_637af_row6_col94\" class=\"data row6 col94\" >-0.112273</td>\n",
+ " <td id=\"T_637af_row6_col95\" class=\"data row6 col95\" >-0.392137</td>\n",
+ " <td id=\"T_637af_row6_col96\" class=\"data row6 col96\" >-1.043558</td>\n",
+ " <td id=\"T_637af_row6_col97\" class=\"data row6 col97\" >-0.467318</td>\n",
+ " <td id=\"T_637af_row6_col98\" class=\"data row6 col98\" >-0.384329</td>\n",
+ " <td id=\"T_637af_row6_col99\" class=\"data row6 col99\" >-2.009207</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n",
+ " <td id=\"T_637af_row7_col0\" class=\"data row7 col0\" >0.658598</td>\n",
+ " <td id=\"T_637af_row7_col1\" class=\"data row7 col1\" >0.101830</td>\n",
+ " <td id=\"T_637af_row7_col2\" class=\"data row7 col2\" >-0.682781</td>\n",
+ " <td id=\"T_637af_row7_col3\" class=\"data row7 col3\" >0.229349</td>\n",
+ " <td id=\"T_637af_row7_col4\" class=\"data row7 col4\" >-0.305657</td>\n",
+ " <td id=\"T_637af_row7_col5\" class=\"data row7 col5\" >0.404877</td>\n",
+ " <td id=\"T_637af_row7_col6\" class=\"data row7 col6\" >0.252244</td>\n",
+ " <td id=\"T_637af_row7_col7\" class=\"data row7 col7\" >-0.837784</td>\n",
+ " <td id=\"T_637af_row7_col8\" class=\"data row7 col8\" >-0.039624</td>\n",
+ " <td id=\"T_637af_row7_col9\" class=\"data row7 col9\" >0.329457</td>\n",
+ " <td id=\"T_637af_row7_col10\" class=\"data row7 col10\" >0.751694</td>\n",
+ " <td id=\"T_637af_row7_col11\" class=\"data row7 col11\" >1.469070</td>\n",
+ " <td id=\"T_637af_row7_col12\" class=\"data row7 col12\" >-0.157199</td>\n",
+ " <td id=\"T_637af_row7_col13\" class=\"data row7 col13\" >1.032628</td>\n",
+ " <td id=\"T_637af_row7_col14\" class=\"data row7 col14\" >-0.584639</td>\n",
+ " <td id=\"T_637af_row7_col15\" class=\"data row7 col15\" >-0.925544</td>\n",
+ " <td id=\"T_637af_row7_col16\" class=\"data row7 col16\" >0.342474</td>\n",
+ " <td id=\"T_637af_row7_col17\" class=\"data row7 col17\" >-0.969363</td>\n",
+ " <td id=\"T_637af_row7_col18\" class=\"data row7 col18\" >0.133480</td>\n",
+ " <td id=\"T_637af_row7_col19\" class=\"data row7 col19\" >-0.385974</td>\n",
+ " <td id=\"T_637af_row7_col20\" class=\"data row7 col20\" >-0.600278</td>\n",
+ " <td id=\"T_637af_row7_col21\" class=\"data row7 col21\" >0.281939</td>\n",
+ " <td id=\"T_637af_row7_col22\" class=\"data row7 col22\" >0.868579</td>\n",
+ " <td id=\"T_637af_row7_col23\" class=\"data row7 col23\" >1.129803</td>\n",
+ " <td id=\"T_637af_row7_col24\" class=\"data row7 col24\" >-0.041898</td>\n",
+ " <td id=\"T_637af_row7_col25\" class=\"data row7 col25\" >0.961193</td>\n",
+ " <td id=\"T_637af_row7_col26\" class=\"data row7 col26\" >0.131521</td>\n",
+ " <td id=\"T_637af_row7_col27\" class=\"data row7 col27\" >-0.792889</td>\n",
+ " <td id=\"T_637af_row7_col28\" class=\"data row7 col28\" >-1.285737</td>\n",
+ " <td id=\"T_637af_row7_col29\" class=\"data row7 col29\" >0.073934</td>\n",
+ " <td id=\"T_637af_row7_col30\" class=\"data row7 col30\" >-1.333315</td>\n",
+ " <td id=\"T_637af_row7_col31\" class=\"data row7 col31\" >-1.044125</td>\n",
+ " <td id=\"T_637af_row7_col32\" class=\"data row7 col32\" >1.277338</td>\n",
+ " <td id=\"T_637af_row7_col33\" class=\"data row7 col33\" >1.492257</td>\n",
+ " <td id=\"T_637af_row7_col34\" class=\"data row7 col34\" >0.411379</td>\n",
+ " <td id=\"T_637af_row7_col35\" class=\"data row7 col35\" >1.771805</td>\n",
+ " <td id=\"T_637af_row7_col36\" class=\"data row7 col36\" >-1.111128</td>\n",
+ " <td id=\"T_637af_row7_col37\" class=\"data row7 col37\" >1.123233</td>\n",
+ " <td id=\"T_637af_row7_col38\" class=\"data row7 col38\" >-1.019449</td>\n",
+ " <td id=\"T_637af_row7_col39\" class=\"data row7 col39\" >1.738357</td>\n",
+ " <td id=\"T_637af_row7_col40\" class=\"data row7 col40\" >-0.690764</td>\n",
+ " <td id=\"T_637af_row7_col41\" class=\"data row7 col41\" >-0.120710</td>\n",
+ " <td id=\"T_637af_row7_col42\" class=\"data row7 col42\" >-0.421359</td>\n",
+ " <td id=\"T_637af_row7_col43\" class=\"data row7 col43\" >-0.727294</td>\n",
+ " <td id=\"T_637af_row7_col44\" class=\"data row7 col44\" >-0.857759</td>\n",
+ " <td id=\"T_637af_row7_col45\" class=\"data row7 col45\" >-0.069436</td>\n",
+ " <td id=\"T_637af_row7_col46\" class=\"data row7 col46\" >-0.328334</td>\n",
+ " <td id=\"T_637af_row7_col47\" class=\"data row7 col47\" >-0.558180</td>\n",
+ " <td id=\"T_637af_row7_col48\" class=\"data row7 col48\" >1.063474</td>\n",
+ " <td id=\"T_637af_row7_col49\" class=\"data row7 col49\" >-0.519133</td>\n",
+ " <td id=\"T_637af_row7_col50\" class=\"data row7 col50\" >-0.496902</td>\n",
+ " <td id=\"T_637af_row7_col51\" class=\"data row7 col51\" >1.089589</td>\n",
+ " <td id=\"T_637af_row7_col52\" class=\"data row7 col52\" >-1.615801</td>\n",
+ " <td id=\"T_637af_row7_col53\" class=\"data row7 col53\" >0.080174</td>\n",
+ " <td id=\"T_637af_row7_col54\" class=\"data row7 col54\" >-0.229938</td>\n",
+ " <td id=\"T_637af_row7_col55\" class=\"data row7 col55\" >-0.498420</td>\n",
+ " <td id=\"T_637af_row7_col56\" class=\"data row7 col56\" >-0.624615</td>\n",
+ " <td id=\"T_637af_row7_col57\" class=\"data row7 col57\" >0.059481</td>\n",
+ " <td id=\"T_637af_row7_col58\" class=\"data row7 col58\" >-0.093158</td>\n",
+ " <td id=\"T_637af_row7_col59\" class=\"data row7 col59\" >-1.784549</td>\n",
+ " <td id=\"T_637af_row7_col60\" class=\"data row7 col60\" >-0.503789</td>\n",
+ " <td id=\"T_637af_row7_col61\" class=\"data row7 col61\" >-0.140528</td>\n",
+ " <td id=\"T_637af_row7_col62\" class=\"data row7 col62\" >0.002653</td>\n",
+ " <td id=\"T_637af_row7_col63\" class=\"data row7 col63\" >-0.484930</td>\n",
+ " <td id=\"T_637af_row7_col64\" class=\"data row7 col64\" >0.055914</td>\n",
+ " <td id=\"T_637af_row7_col65\" class=\"data row7 col65\" >-0.680948</td>\n",
+ " <td id=\"T_637af_row7_col66\" class=\"data row7 col66\" >-0.994271</td>\n",
+ " <td id=\"T_637af_row7_col67\" class=\"data row7 col67\" >1.277052</td>\n",
+ " <td id=\"T_637af_row7_col68\" class=\"data row7 col68\" >0.037651</td>\n",
+ " <td id=\"T_637af_row7_col69\" class=\"data row7 col69\" >2.155421</td>\n",
+ " <td id=\"T_637af_row7_col70\" class=\"data row7 col70\" >-0.437589</td>\n",
+ " <td id=\"T_637af_row7_col71\" class=\"data row7 col71\" >0.696404</td>\n",
+ " <td id=\"T_637af_row7_col72\" class=\"data row7 col72\" >0.417752</td>\n",
+ " <td id=\"T_637af_row7_col73\" class=\"data row7 col73\" >-0.544785</td>\n",
+ " <td id=\"T_637af_row7_col74\" class=\"data row7 col74\" >1.190690</td>\n",
+ " <td id=\"T_637af_row7_col75\" class=\"data row7 col75\" >0.978262</td>\n",
+ " <td id=\"T_637af_row7_col76\" class=\"data row7 col76\" >0.752102</td>\n",
+ " <td id=\"T_637af_row7_col77\" class=\"data row7 col77\" >0.504472</td>\n",
+ " <td id=\"T_637af_row7_col78\" class=\"data row7 col78\" >0.139853</td>\n",
+ " <td id=\"T_637af_row7_col79\" class=\"data row7 col79\" >-0.505089</td>\n",
+ " <td id=\"T_637af_row7_col80\" class=\"data row7 col80\" >-0.264975</td>\n",
+ " <td id=\"T_637af_row7_col81\" class=\"data row7 col81\" >-1.603194</td>\n",
+ " <td id=\"T_637af_row7_col82\" class=\"data row7 col82\" >0.731847</td>\n",
+ " <td id=\"T_637af_row7_col83\" class=\"data row7 col83\" >0.010903</td>\n",
+ " <td id=\"T_637af_row7_col84\" class=\"data row7 col84\" >-1.165346</td>\n",
+ " <td id=\"T_637af_row7_col85\" class=\"data row7 col85\" >-0.125195</td>\n",
+ " <td id=\"T_637af_row7_col86\" class=\"data row7 col86\" >-1.032685</td>\n",
+ " <td id=\"T_637af_row7_col87\" class=\"data row7 col87\" >-0.465520</td>\n",
+ " <td id=\"T_637af_row7_col88\" class=\"data row7 col88\" >1.514808</td>\n",
+ " <td id=\"T_637af_row7_col89\" class=\"data row7 col89\" >0.304762</td>\n",
+ " <td id=\"T_637af_row7_col90\" class=\"data row7 col90\" >0.793414</td>\n",
+ " <td id=\"T_637af_row7_col91\" class=\"data row7 col91\" >0.314635</td>\n",
+ " <td id=\"T_637af_row7_col92\" class=\"data row7 col92\" >-1.638279</td>\n",
+ " <td id=\"T_637af_row7_col93\" class=\"data row7 col93\" >0.111737</td>\n",
+ " <td id=\"T_637af_row7_col94\" class=\"data row7 col94\" >-0.777037</td>\n",
+ " <td id=\"T_637af_row7_col95\" class=\"data row7 col95\" >0.251783</td>\n",
+ " <td id=\"T_637af_row7_col96\" class=\"data row7 col96\" >1.126303</td>\n",
+ " <td id=\"T_637af_row7_col97\" class=\"data row7 col97\" >-0.808798</td>\n",
+ " <td id=\"T_637af_row7_col98\" class=\"data row7 col98\" >0.422064</td>\n",
+ " <td id=\"T_637af_row7_col99\" class=\"data row7 col99\" >-0.349264</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n",
+ " <td id=\"T_637af_row8_col0\" class=\"data row8 col0\" >-0.356362</td>\n",
+ " <td id=\"T_637af_row8_col1\" class=\"data row8 col1\" >-0.089227</td>\n",
+ " <td id=\"T_637af_row8_col2\" class=\"data row8 col2\" >0.609373</td>\n",
+ " <td id=\"T_637af_row8_col3\" class=\"data row8 col3\" >0.542382</td>\n",
+ " <td id=\"T_637af_row8_col4\" class=\"data row8 col4\" >-0.768681</td>\n",
+ " <td id=\"T_637af_row8_col5\" class=\"data row8 col5\" >-0.048074</td>\n",
+ " <td id=\"T_637af_row8_col6\" class=\"data row8 col6\" >2.015458</td>\n",
+ " <td id=\"T_637af_row8_col7\" class=\"data row8 col7\" >-1.552351</td>\n",
+ " <td id=\"T_637af_row8_col8\" class=\"data row8 col8\" >0.251552</td>\n",
+ " <td id=\"T_637af_row8_col9\" class=\"data row8 col9\" >1.459635</td>\n",
+ " <td id=\"T_637af_row8_col10\" class=\"data row8 col10\" >0.949707</td>\n",
+ " <td id=\"T_637af_row8_col11\" class=\"data row8 col11\" >0.339465</td>\n",
+ " <td id=\"T_637af_row8_col12\" class=\"data row8 col12\" >-0.001372</td>\n",
+ " <td id=\"T_637af_row8_col13\" class=\"data row8 col13\" >1.798589</td>\n",
+ " <td id=\"T_637af_row8_col14\" class=\"data row8 col14\" >1.559163</td>\n",
+ " <td id=\"T_637af_row8_col15\" class=\"data row8 col15\" >0.231783</td>\n",
+ " <td id=\"T_637af_row8_col16\" class=\"data row8 col16\" >0.423141</td>\n",
+ " <td id=\"T_637af_row8_col17\" class=\"data row8 col17\" >-0.310530</td>\n",
+ " <td id=\"T_637af_row8_col18\" class=\"data row8 col18\" >0.353795</td>\n",
+ " <td id=\"T_637af_row8_col19\" class=\"data row8 col19\" >2.173336</td>\n",
+ " <td id=\"T_637af_row8_col20\" class=\"data row8 col20\" >-0.196247</td>\n",
+ " <td id=\"T_637af_row8_col21\" class=\"data row8 col21\" >-0.375636</td>\n",
+ " <td id=\"T_637af_row8_col22\" class=\"data row8 col22\" >-0.858221</td>\n",
+ " <td id=\"T_637af_row8_col23\" class=\"data row8 col23\" >0.258410</td>\n",
+ " <td id=\"T_637af_row8_col24\" class=\"data row8 col24\" >0.656430</td>\n",
+ " <td id=\"T_637af_row8_col25\" class=\"data row8 col25\" >0.960819</td>\n",
+ " <td id=\"T_637af_row8_col26\" class=\"data row8 col26\" >1.137893</td>\n",
+ " <td id=\"T_637af_row8_col27\" class=\"data row8 col27\" >1.553405</td>\n",
+ " <td id=\"T_637af_row8_col28\" class=\"data row8 col28\" >0.038981</td>\n",
+ " <td id=\"T_637af_row8_col29\" class=\"data row8 col29\" >-0.632038</td>\n",
+ " <td id=\"T_637af_row8_col30\" class=\"data row8 col30\" >-0.132009</td>\n",
+ " <td id=\"T_637af_row8_col31\" class=\"data row8 col31\" >-1.834997</td>\n",
+ " <td id=\"T_637af_row8_col32\" class=\"data row8 col32\" >-0.242576</td>\n",
+ " <td id=\"T_637af_row8_col33\" class=\"data row8 col33\" >-0.297879</td>\n",
+ " <td id=\"T_637af_row8_col34\" class=\"data row8 col34\" >-0.441559</td>\n",
+ " <td id=\"T_637af_row8_col35\" class=\"data row8 col35\" >-0.769691</td>\n",
+ " <td id=\"T_637af_row8_col36\" class=\"data row8 col36\" >0.224077</td>\n",
+ " <td id=\"T_637af_row8_col37\" class=\"data row8 col37\" >-0.153009</td>\n",
+ " <td id=\"T_637af_row8_col38\" class=\"data row8 col38\" >0.519526</td>\n",
+ " <td id=\"T_637af_row8_col39\" class=\"data row8 col39\" >-0.680188</td>\n",
+ " <td id=\"T_637af_row8_col40\" class=\"data row8 col40\" >0.535851</td>\n",
+ " <td id=\"T_637af_row8_col41\" class=\"data row8 col41\" >0.671496</td>\n",
+ " <td id=\"T_637af_row8_col42\" class=\"data row8 col42\" >-0.183064</td>\n",
+ " <td id=\"T_637af_row8_col43\" class=\"data row8 col43\" >0.301234</td>\n",
+ " <td id=\"T_637af_row8_col44\" class=\"data row8 col44\" >1.288256</td>\n",
+ " <td id=\"T_637af_row8_col45\" class=\"data row8 col45\" >-2.478240</td>\n",
+ " <td id=\"T_637af_row8_col46\" class=\"data row8 col46\" >-0.360403</td>\n",
+ " <td id=\"T_637af_row8_col47\" class=\"data row8 col47\" >0.424067</td>\n",
+ " <td id=\"T_637af_row8_col48\" class=\"data row8 col48\" >-0.834659</td>\n",
+ " <td id=\"T_637af_row8_col49\" class=\"data row8 col49\" >-0.128464</td>\n",
+ " <td id=\"T_637af_row8_col50\" class=\"data row8 col50\" >-0.489013</td>\n",
+ " <td id=\"T_637af_row8_col51\" class=\"data row8 col51\" >-0.014888</td>\n",
+ " <td id=\"T_637af_row8_col52\" class=\"data row8 col52\" >-1.461230</td>\n",
+ " <td id=\"T_637af_row8_col53\" class=\"data row8 col53\" >-1.435223</td>\n",
+ " <td id=\"T_637af_row8_col54\" class=\"data row8 col54\" >-1.319802</td>\n",
+ " <td id=\"T_637af_row8_col55\" class=\"data row8 col55\" >1.083675</td>\n",
+ " <td id=\"T_637af_row8_col56\" class=\"data row8 col56\" >0.979140</td>\n",
+ " <td id=\"T_637af_row8_col57\" class=\"data row8 col57\" >-0.375291</td>\n",
+ " <td id=\"T_637af_row8_col58\" class=\"data row8 col58\" >1.110189</td>\n",
+ " <td id=\"T_637af_row8_col59\" class=\"data row8 col59\" >-1.011351</td>\n",
+ " <td id=\"T_637af_row8_col60\" class=\"data row8 col60\" >0.587886</td>\n",
+ " <td id=\"T_637af_row8_col61\" class=\"data row8 col61\" >-0.822775</td>\n",
+ " <td id=\"T_637af_row8_col62\" class=\"data row8 col62\" >-1.183865</td>\n",
+ " <td id=\"T_637af_row8_col63\" class=\"data row8 col63\" >1.455173</td>\n",
+ " <td id=\"T_637af_row8_col64\" class=\"data row8 col64\" >1.134328</td>\n",
+ " <td id=\"T_637af_row8_col65\" class=\"data row8 col65\" >0.239403</td>\n",
+ " <td id=\"T_637af_row8_col66\" class=\"data row8 col66\" >-0.837991</td>\n",
+ " <td id=\"T_637af_row8_col67\" class=\"data row8 col67\" >-1.130932</td>\n",
+ " <td id=\"T_637af_row8_col68\" class=\"data row8 col68\" >0.783168</td>\n",
+ " <td id=\"T_637af_row8_col69\" class=\"data row8 col69\" >1.845520</td>\n",
+ " <td id=\"T_637af_row8_col70\" class=\"data row8 col70\" >1.437072</td>\n",
+ " <td id=\"T_637af_row8_col71\" class=\"data row8 col71\" >-1.198443</td>\n",
+ " <td id=\"T_637af_row8_col72\" class=\"data row8 col72\" >1.379098</td>\n",
+ " <td id=\"T_637af_row8_col73\" class=\"data row8 col73\" >2.129113</td>\n",
+ " <td id=\"T_637af_row8_col74\" class=\"data row8 col74\" >0.260096</td>\n",
+ " <td id=\"T_637af_row8_col75\" class=\"data row8 col75\" >-0.011975</td>\n",
+ " <td id=\"T_637af_row8_col76\" class=\"data row8 col76\" >0.043302</td>\n",
+ " <td id=\"T_637af_row8_col77\" class=\"data row8 col77\" >0.722941</td>\n",
+ " <td id=\"T_637af_row8_col78\" class=\"data row8 col78\" >1.028152</td>\n",
+ " <td id=\"T_637af_row8_col79\" class=\"data row8 col79\" >-0.235806</td>\n",
+ " <td id=\"T_637af_row8_col80\" class=\"data row8 col80\" >1.145245</td>\n",
+ " <td id=\"T_637af_row8_col81\" class=\"data row8 col81\" >-1.359598</td>\n",
+ " <td id=\"T_637af_row8_col82\" class=\"data row8 col82\" >0.232189</td>\n",
+ " <td id=\"T_637af_row8_col83\" class=\"data row8 col83\" >0.503712</td>\n",
+ " <td id=\"T_637af_row8_col84\" class=\"data row8 col84\" >-0.614264</td>\n",
+ " <td id=\"T_637af_row8_col85\" class=\"data row8 col85\" >-0.530606</td>\n",
+ " <td id=\"T_637af_row8_col86\" class=\"data row8 col86\" >-2.435803</td>\n",
+ " <td id=\"T_637af_row8_col87\" class=\"data row8 col87\" >-0.255238</td>\n",
+ " <td id=\"T_637af_row8_col88\" class=\"data row8 col88\" >-0.064423</td>\n",
+ " <td id=\"T_637af_row8_col89\" class=\"data row8 col89\" >0.784643</td>\n",
+ " <td id=\"T_637af_row8_col90\" class=\"data row8 col90\" >0.256346</td>\n",
+ " <td id=\"T_637af_row8_col91\" class=\"data row8 col91\" >0.128023</td>\n",
+ " <td id=\"T_637af_row8_col92\" class=\"data row8 col92\" >1.414103</td>\n",
+ " <td id=\"T_637af_row8_col93\" class=\"data row8 col93\" >-1.118659</td>\n",
+ " <td id=\"T_637af_row8_col94\" class=\"data row8 col94\" >0.877353</td>\n",
+ " <td id=\"T_637af_row8_col95\" class=\"data row8 col95\" >0.500561</td>\n",
+ " <td id=\"T_637af_row8_col96\" class=\"data row8 col96\" >0.463651</td>\n",
+ " <td id=\"T_637af_row8_col97\" class=\"data row8 col97\" >-2.034512</td>\n",
+ " <td id=\"T_637af_row8_col98\" class=\"data row8 col98\" >-0.981683</td>\n",
+ " <td id=\"T_637af_row8_col99\" class=\"data row8 col99\" >-0.691944</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n",
+ " <td id=\"T_637af_row9_col0\" class=\"data row9 col0\" >-1.113376</td>\n",
+ " <td id=\"T_637af_row9_col1\" class=\"data row9 col1\" >-1.169402</td>\n",
+ " <td id=\"T_637af_row9_col2\" class=\"data row9 col2\" >0.680539</td>\n",
+ " <td id=\"T_637af_row9_col3\" class=\"data row9 col3\" >-1.534212</td>\n",
+ " <td id=\"T_637af_row9_col4\" class=\"data row9 col4\" >1.653817</td>\n",
+ " <td id=\"T_637af_row9_col5\" class=\"data row9 col5\" >-1.295181</td>\n",
+ " <td id=\"T_637af_row9_col6\" class=\"data row9 col6\" >-0.566826</td>\n",
+ " <td id=\"T_637af_row9_col7\" class=\"data row9 col7\" >0.477014</td>\n",
+ " <td id=\"T_637af_row9_col8\" class=\"data row9 col8\" >1.413371</td>\n",
+ " <td id=\"T_637af_row9_col9\" class=\"data row9 col9\" >0.517105</td>\n",
+ " <td id=\"T_637af_row9_col10\" class=\"data row9 col10\" >1.401153</td>\n",
+ " <td id=\"T_637af_row9_col11\" class=\"data row9 col11\" >-0.872685</td>\n",
+ " <td id=\"T_637af_row9_col12\" class=\"data row9 col12\" >0.830957</td>\n",
+ " <td id=\"T_637af_row9_col13\" class=\"data row9 col13\" >0.181507</td>\n",
+ " <td id=\"T_637af_row9_col14\" class=\"data row9 col14\" >-0.145616</td>\n",
+ " <td id=\"T_637af_row9_col15\" class=\"data row9 col15\" >0.694592</td>\n",
+ " <td id=\"T_637af_row9_col16\" class=\"data row9 col16\" >-0.751208</td>\n",
+ " <td id=\"T_637af_row9_col17\" class=\"data row9 col17\" >0.324444</td>\n",
+ " <td id=\"T_637af_row9_col18\" class=\"data row9 col18\" >0.681973</td>\n",
+ " <td id=\"T_637af_row9_col19\" class=\"data row9 col19\" >-0.054972</td>\n",
+ " <td id=\"T_637af_row9_col20\" class=\"data row9 col20\" >0.917776</td>\n",
+ " <td id=\"T_637af_row9_col21\" class=\"data row9 col21\" >-1.024810</td>\n",
+ " <td id=\"T_637af_row9_col22\" class=\"data row9 col22\" >-0.206446</td>\n",
+ " <td id=\"T_637af_row9_col23\" class=\"data row9 col23\" >-0.600113</td>\n",
+ " <td id=\"T_637af_row9_col24\" class=\"data row9 col24\" >0.852805</td>\n",
+ " <td id=\"T_637af_row9_col25\" class=\"data row9 col25\" >1.455109</td>\n",
+ " <td id=\"T_637af_row9_col26\" class=\"data row9 col26\" >-0.079769</td>\n",
+ " <td id=\"T_637af_row9_col27\" class=\"data row9 col27\" >0.076076</td>\n",
+ " <td id=\"T_637af_row9_col28\" class=\"data row9 col28\" >0.207699</td>\n",
+ " <td id=\"T_637af_row9_col29\" class=\"data row9 col29\" >-1.850458</td>\n",
+ " <td id=\"T_637af_row9_col30\" class=\"data row9 col30\" >-0.124124</td>\n",
+ " <td id=\"T_637af_row9_col31\" class=\"data row9 col31\" >-0.610871</td>\n",
+ " <td id=\"T_637af_row9_col32\" class=\"data row9 col32\" >-0.883362</td>\n",
+ " <td id=\"T_637af_row9_col33\" class=\"data row9 col33\" >0.219049</td>\n",
+ " <td id=\"T_637af_row9_col34\" class=\"data row9 col34\" >-0.685094</td>\n",
+ " <td id=\"T_637af_row9_col35\" class=\"data row9 col35\" >-0.645330</td>\n",
+ " <td id=\"T_637af_row9_col36\" class=\"data row9 col36\" >-0.242805</td>\n",
+ " <td id=\"T_637af_row9_col37\" class=\"data row9 col37\" >-0.775602</td>\n",
+ " <td id=\"T_637af_row9_col38\" class=\"data row9 col38\" >0.233070</td>\n",
+ " <td id=\"T_637af_row9_col39\" class=\"data row9 col39\" >2.422642</td>\n",
+ " <td id=\"T_637af_row9_col40\" class=\"data row9 col40\" >-1.423040</td>\n",
+ " <td id=\"T_637af_row9_col41\" class=\"data row9 col41\" >-0.582421</td>\n",
+ " <td id=\"T_637af_row9_col42\" class=\"data row9 col42\" >0.968304</td>\n",
+ " <td id=\"T_637af_row9_col43\" class=\"data row9 col43\" >-0.701025</td>\n",
+ " <td id=\"T_637af_row9_col44\" class=\"data row9 col44\" >-0.167850</td>\n",
+ " <td id=\"T_637af_row9_col45\" class=\"data row9 col45\" >0.277264</td>\n",
+ " <td id=\"T_637af_row9_col46\" class=\"data row9 col46\" >1.301231</td>\n",
+ " <td id=\"T_637af_row9_col47\" class=\"data row9 col47\" >0.301205</td>\n",
+ " <td id=\"T_637af_row9_col48\" class=\"data row9 col48\" >-3.081249</td>\n",
+ " <td id=\"T_637af_row9_col49\" class=\"data row9 col49\" >-0.562868</td>\n",
+ " <td id=\"T_637af_row9_col50\" class=\"data row9 col50\" >0.192944</td>\n",
+ " <td id=\"T_637af_row9_col51\" class=\"data row9 col51\" >-0.664592</td>\n",
+ " <td id=\"T_637af_row9_col52\" class=\"data row9 col52\" >0.565686</td>\n",
+ " <td id=\"T_637af_row9_col53\" class=\"data row9 col53\" >0.190913</td>\n",
+ " <td id=\"T_637af_row9_col54\" class=\"data row9 col54\" >-0.841858</td>\n",
+ " <td id=\"T_637af_row9_col55\" class=\"data row9 col55\" >-1.856545</td>\n",
+ " <td id=\"T_637af_row9_col56\" class=\"data row9 col56\" >-1.022777</td>\n",
+ " <td id=\"T_637af_row9_col57\" class=\"data row9 col57\" >1.295968</td>\n",
+ " <td id=\"T_637af_row9_col58\" class=\"data row9 col58\" >0.451921</td>\n",
+ " <td id=\"T_637af_row9_col59\" class=\"data row9 col59\" >0.659955</td>\n",
+ " <td id=\"T_637af_row9_col60\" class=\"data row9 col60\" >0.065818</td>\n",
+ " <td id=\"T_637af_row9_col61\" class=\"data row9 col61\" >-0.319586</td>\n",
+ " <td id=\"T_637af_row9_col62\" class=\"data row9 col62\" >0.253495</td>\n",
+ " <td id=\"T_637af_row9_col63\" class=\"data row9 col63\" >-1.144646</td>\n",
+ " <td id=\"T_637af_row9_col64\" class=\"data row9 col64\" >-0.483404</td>\n",
+ " <td id=\"T_637af_row9_col65\" class=\"data row9 col65\" >0.555902</td>\n",
+ " <td id=\"T_637af_row9_col66\" class=\"data row9 col66\" >0.807069</td>\n",
+ " <td id=\"T_637af_row9_col67\" class=\"data row9 col67\" >0.714196</td>\n",
+ " <td id=\"T_637af_row9_col68\" class=\"data row9 col68\" >0.661196</td>\n",
+ " <td id=\"T_637af_row9_col69\" class=\"data row9 col69\" >0.053667</td>\n",
+ " <td id=\"T_637af_row9_col70\" class=\"data row9 col70\" >0.346833</td>\n",
+ " <td id=\"T_637af_row9_col71\" class=\"data row9 col71\" >-1.288977</td>\n",
+ " <td id=\"T_637af_row9_col72\" class=\"data row9 col72\" >-0.386734</td>\n",
+ " <td id=\"T_637af_row9_col73\" class=\"data row9 col73\" >-1.262127</td>\n",
+ " <td id=\"T_637af_row9_col74\" class=\"data row9 col74\" >0.477495</td>\n",
+ " <td id=\"T_637af_row9_col75\" class=\"data row9 col75\" >-0.494034</td>\n",
+ " <td id=\"T_637af_row9_col76\" class=\"data row9 col76\" >-0.911414</td>\n",
+ " <td id=\"T_637af_row9_col77\" class=\"data row9 col77\" >1.152963</td>\n",
+ " <td id=\"T_637af_row9_col78\" class=\"data row9 col78\" >-0.342365</td>\n",
+ " <td id=\"T_637af_row9_col79\" class=\"data row9 col79\" >-0.160187</td>\n",
+ " <td id=\"T_637af_row9_col80\" class=\"data row9 col80\" >0.470054</td>\n",
+ " <td id=\"T_637af_row9_col81\" class=\"data row9 col81\" >-0.853063</td>\n",
+ " <td id=\"T_637af_row9_col82\" class=\"data row9 col82\" >-1.387949</td>\n",
+ " <td id=\"T_637af_row9_col83\" class=\"data row9 col83\" >-0.257257</td>\n",
+ " <td id=\"T_637af_row9_col84\" class=\"data row9 col84\" >-1.030690</td>\n",
+ " <td id=\"T_637af_row9_col85\" class=\"data row9 col85\" >-0.110210</td>\n",
+ " <td id=\"T_637af_row9_col86\" class=\"data row9 col86\" >0.328911</td>\n",
+ " <td id=\"T_637af_row9_col87\" class=\"data row9 col87\" >-0.555923</td>\n",
+ " <td id=\"T_637af_row9_col88\" class=\"data row9 col88\" >0.987713</td>\n",
+ " <td id=\"T_637af_row9_col89\" class=\"data row9 col89\" >-0.501957</td>\n",
+ " <td id=\"T_637af_row9_col90\" class=\"data row9 col90\" >2.069887</td>\n",
+ " <td id=\"T_637af_row9_col91\" class=\"data row9 col91\" >-0.067503</td>\n",
+ " <td id=\"T_637af_row9_col92\" class=\"data row9 col92\" >0.316029</td>\n",
+ " <td id=\"T_637af_row9_col93\" class=\"data row9 col93\" >-1.506232</td>\n",
+ " <td id=\"T_637af_row9_col94\" class=\"data row9 col94\" >2.201621</td>\n",
+ " <td id=\"T_637af_row9_col95\" class=\"data row9 col95\" >0.492097</td>\n",
+ " <td id=\"T_637af_row9_col96\" class=\"data row9 col96\" >-0.085193</td>\n",
+ " <td id=\"T_637af_row9_col97\" class=\"data row9 col97\" >-0.977822</td>\n",
+ " <td id=\"T_637af_row9_col98\" class=\"data row9 col98\" >1.039147</td>\n",
+ " <td id=\"T_637af_row9_col99\" class=\"data row9 col99\" >-0.653932</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row10\" class=\"row_heading level0 row10\" >10</th>\n",
+ " <td id=\"T_637af_row10_col0\" class=\"data row10 col0\" >-0.405638</td>\n",
+ " <td id=\"T_637af_row10_col1\" class=\"data row10 col1\" >-1.402027</td>\n",
+ " <td id=\"T_637af_row10_col2\" class=\"data row10 col2\" >-1.166242</td>\n",
+ " <td id=\"T_637af_row10_col3\" class=\"data row10 col3\" >1.306184</td>\n",
+ " <td id=\"T_637af_row10_col4\" class=\"data row10 col4\" >0.856283</td>\n",
+ " <td id=\"T_637af_row10_col5\" class=\"data row10 col5\" >-1.236170</td>\n",
+ " <td id=\"T_637af_row10_col6\" class=\"data row10 col6\" >-0.646721</td>\n",
+ " <td id=\"T_637af_row10_col7\" class=\"data row10 col7\" >-1.474064</td>\n",
+ " <td id=\"T_637af_row10_col8\" class=\"data row10 col8\" >0.082960</td>\n",
+ " <td id=\"T_637af_row10_col9\" class=\"data row10 col9\" >0.090310</td>\n",
+ " <td id=\"T_637af_row10_col10\" class=\"data row10 col10\" >-0.169977</td>\n",
+ " <td id=\"T_637af_row10_col11\" class=\"data row10 col11\" >0.406345</td>\n",
+ " <td id=\"T_637af_row10_col12\" class=\"data row10 col12\" >0.915427</td>\n",
+ " <td id=\"T_637af_row10_col13\" class=\"data row10 col13\" >-0.974503</td>\n",
+ " <td id=\"T_637af_row10_col14\" class=\"data row10 col14\" >0.271637</td>\n",
+ " <td id=\"T_637af_row10_col15\" class=\"data row10 col15\" >1.539184</td>\n",
+ " <td id=\"T_637af_row10_col16\" class=\"data row10 col16\" >-0.098866</td>\n",
+ " <td id=\"T_637af_row10_col17\" class=\"data row10 col17\" >-0.525149</td>\n",
+ " <td id=\"T_637af_row10_col18\" class=\"data row10 col18\" >1.063933</td>\n",
+ " <td id=\"T_637af_row10_col19\" class=\"data row10 col19\" >0.085827</td>\n",
+ " <td id=\"T_637af_row10_col20\" class=\"data row10 col20\" >-0.129622</td>\n",
+ " <td id=\"T_637af_row10_col21\" class=\"data row10 col21\" >0.947959</td>\n",
+ " <td id=\"T_637af_row10_col22\" class=\"data row10 col22\" >-0.072496</td>\n",
+ " <td id=\"T_637af_row10_col23\" class=\"data row10 col23\" >-0.237592</td>\n",
+ " <td id=\"T_637af_row10_col24\" class=\"data row10 col24\" >0.012549</td>\n",
+ " <td id=\"T_637af_row10_col25\" class=\"data row10 col25\" >1.065761</td>\n",
+ " <td id=\"T_637af_row10_col26\" class=\"data row10 col26\" >0.996596</td>\n",
+ " <td id=\"T_637af_row10_col27\" class=\"data row10 col27\" >-0.172481</td>\n",
+ " <td id=\"T_637af_row10_col28\" class=\"data row10 col28\" >2.583139</td>\n",
+ " <td id=\"T_637af_row10_col29\" class=\"data row10 col29\" >-0.028578</td>\n",
+ " <td id=\"T_637af_row10_col30\" class=\"data row10 col30\" >-0.254856</td>\n",
+ " <td id=\"T_637af_row10_col31\" class=\"data row10 col31\" >1.328794</td>\n",
+ " <td id=\"T_637af_row10_col32\" class=\"data row10 col32\" >-1.592951</td>\n",
+ " <td id=\"T_637af_row10_col33\" class=\"data row10 col33\" >2.434350</td>\n",
+ " <td id=\"T_637af_row10_col34\" class=\"data row10 col34\" >-0.341500</td>\n",
+ " <td id=\"T_637af_row10_col35\" class=\"data row10 col35\" >-0.307719</td>\n",
+ " <td id=\"T_637af_row10_col36\" class=\"data row10 col36\" >-1.333273</td>\n",
+ " <td id=\"T_637af_row10_col37\" class=\"data row10 col37\" >-1.100845</td>\n",
+ " <td id=\"T_637af_row10_col38\" class=\"data row10 col38\" >0.209097</td>\n",
+ " <td id=\"T_637af_row10_col39\" class=\"data row10 col39\" >1.734777</td>\n",
+ " <td id=\"T_637af_row10_col40\" class=\"data row10 col40\" >0.639632</td>\n",
+ " <td id=\"T_637af_row10_col41\" class=\"data row10 col41\" >0.424779</td>\n",
+ " <td id=\"T_637af_row10_col42\" class=\"data row10 col42\" >-0.129327</td>\n",
+ " <td id=\"T_637af_row10_col43\" class=\"data row10 col43\" >0.905029</td>\n",
+ " <td id=\"T_637af_row10_col44\" class=\"data row10 col44\" >-0.482909</td>\n",
+ " <td id=\"T_637af_row10_col45\" class=\"data row10 col45\" >1.731628</td>\n",
+ " <td id=\"T_637af_row10_col46\" class=\"data row10 col46\" >-2.783425</td>\n",
+ " <td id=\"T_637af_row10_col47\" class=\"data row10 col47\" >-0.333677</td>\n",
+ " <td id=\"T_637af_row10_col48\" class=\"data row10 col48\" >-0.110895</td>\n",
+ " <td id=\"T_637af_row10_col49\" class=\"data row10 col49\" >1.212636</td>\n",
+ " <td id=\"T_637af_row10_col50\" class=\"data row10 col50\" >-0.208412</td>\n",
+ " <td id=\"T_637af_row10_col51\" class=\"data row10 col51\" >0.427117</td>\n",
+ " <td id=\"T_637af_row10_col52\" class=\"data row10 col52\" >1.348563</td>\n",
+ " <td id=\"T_637af_row10_col53\" class=\"data row10 col53\" >0.043859</td>\n",
+ " <td id=\"T_637af_row10_col54\" class=\"data row10 col54\" >1.772519</td>\n",
+ " <td id=\"T_637af_row10_col55\" class=\"data row10 col55\" >-1.416106</td>\n",
+ " <td id=\"T_637af_row10_col56\" class=\"data row10 col56\" >0.401155</td>\n",
+ " <td id=\"T_637af_row10_col57\" class=\"data row10 col57\" >0.807157</td>\n",
+ " <td id=\"T_637af_row10_col58\" class=\"data row10 col58\" >0.303427</td>\n",
+ " <td id=\"T_637af_row10_col59\" class=\"data row10 col59\" >-1.246288</td>\n",
+ " <td id=\"T_637af_row10_col60\" class=\"data row10 col60\" >0.178774</td>\n",
+ " <td id=\"T_637af_row10_col61\" class=\"data row10 col61\" >-0.066126</td>\n",
+ " <td id=\"T_637af_row10_col62\" class=\"data row10 col62\" >-1.862288</td>\n",
+ " <td id=\"T_637af_row10_col63\" class=\"data row10 col63\" >1.241295</td>\n",
+ " <td id=\"T_637af_row10_col64\" class=\"data row10 col64\" >0.377021</td>\n",
+ " <td id=\"T_637af_row10_col65\" class=\"data row10 col65\" >-0.822320</td>\n",
+ " <td id=\"T_637af_row10_col66\" class=\"data row10 col66\" >-0.749014</td>\n",
+ " <td id=\"T_637af_row10_col67\" class=\"data row10 col67\" >1.463652</td>\n",
+ " <td id=\"T_637af_row10_col68\" class=\"data row10 col68\" >1.602268</td>\n",
+ " <td id=\"T_637af_row10_col69\" class=\"data row10 col69\" >-1.043877</td>\n",
+ " <td id=\"T_637af_row10_col70\" class=\"data row10 col70\" >1.185290</td>\n",
+ " <td id=\"T_637af_row10_col71\" class=\"data row10 col71\" >-0.565783</td>\n",
+ " <td id=\"T_637af_row10_col72\" class=\"data row10 col72\" >-1.076879</td>\n",
+ " <td id=\"T_637af_row10_col73\" class=\"data row10 col73\" >1.360241</td>\n",
+ " <td id=\"T_637af_row10_col74\" class=\"data row10 col74\" >-0.121991</td>\n",
+ " <td id=\"T_637af_row10_col75\" class=\"data row10 col75\" >0.991043</td>\n",
+ " <td id=\"T_637af_row10_col76\" class=\"data row10 col76\" >1.007952</td>\n",
+ " <td id=\"T_637af_row10_col77\" class=\"data row10 col77\" >0.450185</td>\n",
+ " <td id=\"T_637af_row10_col78\" class=\"data row10 col78\" >-0.744376</td>\n",
+ " <td id=\"T_637af_row10_col79\" class=\"data row10 col79\" >1.388876</td>\n",
+ " <td id=\"T_637af_row10_col80\" class=\"data row10 col80\" >-0.316847</td>\n",
+ " <td id=\"T_637af_row10_col81\" class=\"data row10 col81\" >-0.841655</td>\n",
+ " <td id=\"T_637af_row10_col82\" class=\"data row10 col82\" >-1.056842</td>\n",
+ " <td id=\"T_637af_row10_col83\" class=\"data row10 col83\" >-0.500226</td>\n",
+ " <td id=\"T_637af_row10_col84\" class=\"data row10 col84\" >0.096959</td>\n",
+ " <td id=\"T_637af_row10_col85\" class=\"data row10 col85\" >1.176896</td>\n",
+ " <td id=\"T_637af_row10_col86\" class=\"data row10 col86\" >-2.939652</td>\n",
+ " <td id=\"T_637af_row10_col87\" class=\"data row10 col87\" >1.792213</td>\n",
+ " <td id=\"T_637af_row10_col88\" class=\"data row10 col88\" >0.316340</td>\n",
+ " <td id=\"T_637af_row10_col89\" class=\"data row10 col89\" >0.303218</td>\n",
+ " <td id=\"T_637af_row10_col90\" class=\"data row10 col90\" >1.024967</td>\n",
+ " <td id=\"T_637af_row10_col91\" class=\"data row10 col91\" >-0.590871</td>\n",
+ " <td id=\"T_637af_row10_col92\" class=\"data row10 col92\" >-0.453326</td>\n",
+ " <td id=\"T_637af_row10_col93\" class=\"data row10 col93\" >-0.795981</td>\n",
+ " <td id=\"T_637af_row10_col94\" class=\"data row10 col94\" >-0.393301</td>\n",
+ " <td id=\"T_637af_row10_col95\" class=\"data row10 col95\" >-0.374372</td>\n",
+ " <td id=\"T_637af_row10_col96\" class=\"data row10 col96\" >-1.270199</td>\n",
+ " <td id=\"T_637af_row10_col97\" class=\"data row10 col97\" >1.618372</td>\n",
+ " <td id=\"T_637af_row10_col98\" class=\"data row10 col98\" >1.197727</td>\n",
+ " <td id=\"T_637af_row10_col99\" class=\"data row10 col99\" >-0.914863</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row11\" class=\"row_heading level0 row11\" >11</th>\n",
+ " <td id=\"T_637af_row11_col0\" class=\"data row11 col0\" >-0.625210</td>\n",
+ " <td id=\"T_637af_row11_col1\" class=\"data row11 col1\" >0.288911</td>\n",
+ " <td id=\"T_637af_row11_col2\" class=\"data row11 col2\" >0.288374</td>\n",
+ " <td id=\"T_637af_row11_col3\" class=\"data row11 col3\" >-1.372667</td>\n",
+ " <td id=\"T_637af_row11_col4\" class=\"data row11 col4\" >-0.591395</td>\n",
+ " <td id=\"T_637af_row11_col5\" class=\"data row11 col5\" >-0.478942</td>\n",
+ " <td id=\"T_637af_row11_col6\" class=\"data row11 col6\" >1.335664</td>\n",
+ " <td id=\"T_637af_row11_col7\" class=\"data row11 col7\" >-0.459855</td>\n",
+ " <td id=\"T_637af_row11_col8\" class=\"data row11 col8\" >-1.615975</td>\n",
+ " <td id=\"T_637af_row11_col9\" class=\"data row11 col9\" >-1.189676</td>\n",
+ " <td id=\"T_637af_row11_col10\" class=\"data row11 col10\" >0.374767</td>\n",
+ " <td id=\"T_637af_row11_col11\" class=\"data row11 col11\" >-2.488733</td>\n",
+ " <td id=\"T_637af_row11_col12\" class=\"data row11 col12\" >0.586656</td>\n",
+ " <td id=\"T_637af_row11_col13\" class=\"data row11 col13\" >-1.422008</td>\n",
+ " <td id=\"T_637af_row11_col14\" class=\"data row11 col14\" >0.496030</td>\n",
+ " <td id=\"T_637af_row11_col15\" class=\"data row11 col15\" >1.911128</td>\n",
+ " <td id=\"T_637af_row11_col16\" class=\"data row11 col16\" >-0.560660</td>\n",
+ " <td id=\"T_637af_row11_col17\" class=\"data row11 col17\" >-0.499614</td>\n",
+ " <td id=\"T_637af_row11_col18\" class=\"data row11 col18\" >-0.372171</td>\n",
+ " <td id=\"T_637af_row11_col19\" class=\"data row11 col19\" >-1.833069</td>\n",
+ " <td id=\"T_637af_row11_col20\" class=\"data row11 col20\" >0.237124</td>\n",
+ " <td id=\"T_637af_row11_col21\" class=\"data row11 col21\" >-0.944446</td>\n",
+ " <td id=\"T_637af_row11_col22\" class=\"data row11 col22\" >0.912140</td>\n",
+ " <td id=\"T_637af_row11_col23\" class=\"data row11 col23\" >0.359790</td>\n",
+ " <td id=\"T_637af_row11_col24\" class=\"data row11 col24\" >-1.359235</td>\n",
+ " <td id=\"T_637af_row11_col25\" class=\"data row11 col25\" >0.166966</td>\n",
+ " <td id=\"T_637af_row11_col26\" class=\"data row11 col26\" >-0.047107</td>\n",
+ " <td id=\"T_637af_row11_col27\" class=\"data row11 col27\" >-0.279789</td>\n",
+ " <td id=\"T_637af_row11_col28\" class=\"data row11 col28\" >-0.594454</td>\n",
+ " <td id=\"T_637af_row11_col29\" class=\"data row11 col29\" >-0.739013</td>\n",
+ " <td id=\"T_637af_row11_col30\" class=\"data row11 col30\" >-1.527645</td>\n",
+ " <td id=\"T_637af_row11_col31\" class=\"data row11 col31\" >0.401668</td>\n",
+ " <td id=\"T_637af_row11_col32\" class=\"data row11 col32\" >1.791252</td>\n",
+ " <td id=\"T_637af_row11_col33\" class=\"data row11 col33\" >-2.774848</td>\n",
+ " <td id=\"T_637af_row11_col34\" class=\"data row11 col34\" >0.523873</td>\n",
+ " <td id=\"T_637af_row11_col35\" class=\"data row11 col35\" >2.207585</td>\n",
+ " <td id=\"T_637af_row11_col36\" class=\"data row11 col36\" >0.488999</td>\n",
+ " <td id=\"T_637af_row11_col37\" class=\"data row11 col37\" >-0.339283</td>\n",
+ " <td id=\"T_637af_row11_col38\" class=\"data row11 col38\" >0.131711</td>\n",
+ " <td id=\"T_637af_row11_col39\" class=\"data row11 col39\" >0.018409</td>\n",
+ " <td id=\"T_637af_row11_col40\" class=\"data row11 col40\" >1.186551</td>\n",
+ " <td id=\"T_637af_row11_col41\" class=\"data row11 col41\" >-0.424318</td>\n",
+ " <td id=\"T_637af_row11_col42\" class=\"data row11 col42\" >1.554994</td>\n",
+ " <td id=\"T_637af_row11_col43\" class=\"data row11 col43\" >-0.205917</td>\n",
+ " <td id=\"T_637af_row11_col44\" class=\"data row11 col44\" >-0.934975</td>\n",
+ " <td id=\"T_637af_row11_col45\" class=\"data row11 col45\" >0.654102</td>\n",
+ " <td id=\"T_637af_row11_col46\" class=\"data row11 col46\" >-1.227761</td>\n",
+ " <td id=\"T_637af_row11_col47\" class=\"data row11 col47\" >-0.461025</td>\n",
+ " <td id=\"T_637af_row11_col48\" class=\"data row11 col48\" >-0.421201</td>\n",
+ " <td id=\"T_637af_row11_col49\" class=\"data row11 col49\" >-0.058615</td>\n",
+ " <td id=\"T_637af_row11_col50\" class=\"data row11 col50\" >-0.584563</td>\n",
+ " <td id=\"T_637af_row11_col51\" class=\"data row11 col51\" >0.336913</td>\n",
+ " <td id=\"T_637af_row11_col52\" class=\"data row11 col52\" >-0.477102</td>\n",
+ " <td id=\"T_637af_row11_col53\" class=\"data row11 col53\" >-1.381463</td>\n",
+ " <td id=\"T_637af_row11_col54\" class=\"data row11 col54\" >0.757745</td>\n",
+ " <td id=\"T_637af_row11_col55\" class=\"data row11 col55\" >-0.268968</td>\n",
+ " <td id=\"T_637af_row11_col56\" class=\"data row11 col56\" >0.034870</td>\n",
+ " <td id=\"T_637af_row11_col57\" class=\"data row11 col57\" >1.231686</td>\n",
+ " <td id=\"T_637af_row11_col58\" class=\"data row11 col58\" >0.236600</td>\n",
+ " <td id=\"T_637af_row11_col59\" class=\"data row11 col59\" >1.234720</td>\n",
+ " <td id=\"T_637af_row11_col60\" class=\"data row11 col60\" >-0.040247</td>\n",
+ " <td id=\"T_637af_row11_col61\" class=\"data row11 col61\" >0.029582</td>\n",
+ " <td id=\"T_637af_row11_col62\" class=\"data row11 col62\" >1.034905</td>\n",
+ " <td id=\"T_637af_row11_col63\" class=\"data row11 col63\" >0.380204</td>\n",
+ " <td id=\"T_637af_row11_col64\" class=\"data row11 col64\" >-0.012108</td>\n",
+ " <td id=\"T_637af_row11_col65\" class=\"data row11 col65\" >-0.859511</td>\n",
+ " <td id=\"T_637af_row11_col66\" class=\"data row11 col66\" >-0.990340</td>\n",
+ " <td id=\"T_637af_row11_col67\" class=\"data row11 col67\" >-1.205172</td>\n",
+ " <td id=\"T_637af_row11_col68\" class=\"data row11 col68\" >-1.030178</td>\n",
+ " <td id=\"T_637af_row11_col69\" class=\"data row11 col69\" >0.426676</td>\n",
+ " <td id=\"T_637af_row11_col70\" class=\"data row11 col70\" >0.497796</td>\n",
+ " <td id=\"T_637af_row11_col71\" class=\"data row11 col71\" >-0.876808</td>\n",
+ " <td id=\"T_637af_row11_col72\" class=\"data row11 col72\" >0.957963</td>\n",
+ " <td id=\"T_637af_row11_col73\" class=\"data row11 col73\" >0.173016</td>\n",
+ " <td id=\"T_637af_row11_col74\" class=\"data row11 col74\" >0.131612</td>\n",
+ " <td id=\"T_637af_row11_col75\" class=\"data row11 col75\" >-1.003556</td>\n",
+ " <td id=\"T_637af_row11_col76\" class=\"data row11 col76\" >-1.069908</td>\n",
+ " <td id=\"T_637af_row11_col77\" class=\"data row11 col77\" >-1.799207</td>\n",
+ " <td id=\"T_637af_row11_col78\" class=\"data row11 col78\" >1.429598</td>\n",
+ " <td id=\"T_637af_row11_col79\" class=\"data row11 col79\" >-0.116015</td>\n",
+ " <td id=\"T_637af_row11_col80\" class=\"data row11 col80\" >-1.454980</td>\n",
+ " <td id=\"T_637af_row11_col81\" class=\"data row11 col81\" >0.261917</td>\n",
+ " <td id=\"T_637af_row11_col82\" class=\"data row11 col82\" >0.444412</td>\n",
+ " <td id=\"T_637af_row11_col83\" class=\"data row11 col83\" >0.273290</td>\n",
+ " <td id=\"T_637af_row11_col84\" class=\"data row11 col84\" >0.844115</td>\n",
+ " <td id=\"T_637af_row11_col85\" class=\"data row11 col85\" >0.218745</td>\n",
+ " <td id=\"T_637af_row11_col86\" class=\"data row11 col86\" >-1.033350</td>\n",
+ " <td id=\"T_637af_row11_col87\" class=\"data row11 col87\" >-1.188295</td>\n",
+ " <td id=\"T_637af_row11_col88\" class=\"data row11 col88\" >0.058373</td>\n",
+ " <td id=\"T_637af_row11_col89\" class=\"data row11 col89\" >0.800523</td>\n",
+ " <td id=\"T_637af_row11_col90\" class=\"data row11 col90\" >-1.627068</td>\n",
+ " <td id=\"T_637af_row11_col91\" class=\"data row11 col91\" >0.861651</td>\n",
+ " <td id=\"T_637af_row11_col92\" class=\"data row11 col92\" >0.871018</td>\n",
+ " <td id=\"T_637af_row11_col93\" class=\"data row11 col93\" >-0.003733</td>\n",
+ " <td id=\"T_637af_row11_col94\" class=\"data row11 col94\" >-0.243354</td>\n",
+ " <td id=\"T_637af_row11_col95\" class=\"data row11 col95\" >0.947296</td>\n",
+ " <td id=\"T_637af_row11_col96\" class=\"data row11 col96\" >0.509406</td>\n",
+ " <td id=\"T_637af_row11_col97\" class=\"data row11 col97\" >0.044546</td>\n",
+ " <td id=\"T_637af_row11_col98\" class=\"data row11 col98\" >0.266896</td>\n",
+ " <td id=\"T_637af_row11_col99\" class=\"data row11 col99\" >1.337165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row12\" class=\"row_heading level0 row12\" >12</th>\n",
+ " <td id=\"T_637af_row12_col0\" class=\"data row12 col0\" >0.699142</td>\n",
+ " <td id=\"T_637af_row12_col1\" class=\"data row12 col1\" >-1.928033</td>\n",
+ " <td id=\"T_637af_row12_col2\" class=\"data row12 col2\" >0.105363</td>\n",
+ " <td id=\"T_637af_row12_col3\" class=\"data row12 col3\" >1.042322</td>\n",
+ " <td id=\"T_637af_row12_col4\" class=\"data row12 col4\" >0.715206</td>\n",
+ " <td id=\"T_637af_row12_col5\" class=\"data row12 col5\" >-0.763783</td>\n",
+ " <td id=\"T_637af_row12_col6\" class=\"data row12 col6\" >0.098798</td>\n",
+ " <td id=\"T_637af_row12_col7\" class=\"data row12 col7\" >-1.157898</td>\n",
+ " <td id=\"T_637af_row12_col8\" class=\"data row12 col8\" >0.134105</td>\n",
+ " <td id=\"T_637af_row12_col9\" class=\"data row12 col9\" >0.042041</td>\n",
+ " <td id=\"T_637af_row12_col10\" class=\"data row12 col10\" >0.674826</td>\n",
+ " <td id=\"T_637af_row12_col11\" class=\"data row12 col11\" >0.165649</td>\n",
+ " <td id=\"T_637af_row12_col12\" class=\"data row12 col12\" >-1.622970</td>\n",
+ " <td id=\"T_637af_row12_col13\" class=\"data row12 col13\" >-3.131274</td>\n",
+ " <td id=\"T_637af_row12_col14\" class=\"data row12 col14\" >0.597649</td>\n",
+ " <td id=\"T_637af_row12_col15\" class=\"data row12 col15\" >-1.880331</td>\n",
+ " <td id=\"T_637af_row12_col16\" class=\"data row12 col16\" >0.663980</td>\n",
+ " <td id=\"T_637af_row12_col17\" class=\"data row12 col17\" >-0.256033</td>\n",
+ " <td id=\"T_637af_row12_col18\" class=\"data row12 col18\" >-1.524058</td>\n",
+ " <td id=\"T_637af_row12_col19\" class=\"data row12 col19\" >0.492799</td>\n",
+ " <td id=\"T_637af_row12_col20\" class=\"data row12 col20\" >0.221163</td>\n",
+ " <td id=\"T_637af_row12_col21\" class=\"data row12 col21\" >0.429622</td>\n",
+ " <td id=\"T_637af_row12_col22\" class=\"data row12 col22\" >-0.659584</td>\n",
+ " <td id=\"T_637af_row12_col23\" class=\"data row12 col23\" >1.264506</td>\n",
+ " <td id=\"T_637af_row12_col24\" class=\"data row12 col24\" >-0.032131</td>\n",
+ " <td id=\"T_637af_row12_col25\" class=\"data row12 col25\" >-2.114907</td>\n",
+ " <td id=\"T_637af_row12_col26\" class=\"data row12 col26\" >-0.264043</td>\n",
+ " <td id=\"T_637af_row12_col27\" class=\"data row12 col27\" >0.457835</td>\n",
+ " <td id=\"T_637af_row12_col28\" class=\"data row12 col28\" >-0.676837</td>\n",
+ " <td id=\"T_637af_row12_col29\" class=\"data row12 col29\" >-0.629003</td>\n",
+ " <td id=\"T_637af_row12_col30\" class=\"data row12 col30\" >0.489145</td>\n",
+ " <td id=\"T_637af_row12_col31\" class=\"data row12 col31\" >-0.551686</td>\n",
+ " <td id=\"T_637af_row12_col32\" class=\"data row12 col32\" >0.942622</td>\n",
+ " <td id=\"T_637af_row12_col33\" class=\"data row12 col33\" >-0.512043</td>\n",
+ " <td id=\"T_637af_row12_col34\" class=\"data row12 col34\" >-0.455893</td>\n",
+ " <td id=\"T_637af_row12_col35\" class=\"data row12 col35\" >0.021244</td>\n",
+ " <td id=\"T_637af_row12_col36\" class=\"data row12 col36\" >-0.178035</td>\n",
+ " <td id=\"T_637af_row12_col37\" class=\"data row12 col37\" >-2.498073</td>\n",
+ " <td id=\"T_637af_row12_col38\" class=\"data row12 col38\" >-0.171292</td>\n",
+ " <td id=\"T_637af_row12_col39\" class=\"data row12 col39\" >0.323510</td>\n",
+ " <td id=\"T_637af_row12_col40\" class=\"data row12 col40\" >-0.545163</td>\n",
+ " <td id=\"T_637af_row12_col41\" class=\"data row12 col41\" >-0.668909</td>\n",
+ " <td id=\"T_637af_row12_col42\" class=\"data row12 col42\" >-0.150031</td>\n",
+ " <td id=\"T_637af_row12_col43\" class=\"data row12 col43\" >0.521620</td>\n",
+ " <td id=\"T_637af_row12_col44\" class=\"data row12 col44\" >-0.428980</td>\n",
+ " <td id=\"T_637af_row12_col45\" class=\"data row12 col45\" >0.676463</td>\n",
+ " <td id=\"T_637af_row12_col46\" class=\"data row12 col46\" >0.369081</td>\n",
+ " <td id=\"T_637af_row12_col47\" class=\"data row12 col47\" >-0.724832</td>\n",
+ " <td id=\"T_637af_row12_col48\" class=\"data row12 col48\" >0.793542</td>\n",
+ " <td id=\"T_637af_row12_col49\" class=\"data row12 col49\" >1.237422</td>\n",
+ " <td id=\"T_637af_row12_col50\" class=\"data row12 col50\" >0.401275</td>\n",
+ " <td id=\"T_637af_row12_col51\" class=\"data row12 col51\" >2.141523</td>\n",
+ " <td id=\"T_637af_row12_col52\" class=\"data row12 col52\" >0.249012</td>\n",
+ " <td id=\"T_637af_row12_col53\" class=\"data row12 col53\" >0.486755</td>\n",
+ " <td id=\"T_637af_row12_col54\" class=\"data row12 col54\" >-0.163274</td>\n",
+ " <td id=\"T_637af_row12_col55\" class=\"data row12 col55\" >0.592222</td>\n",
+ " <td id=\"T_637af_row12_col56\" class=\"data row12 col56\" >-0.292600</td>\n",
+ " <td id=\"T_637af_row12_col57\" class=\"data row12 col57\" >-0.547168</td>\n",
+ " <td id=\"T_637af_row12_col58\" class=\"data row12 col58\" >0.619104</td>\n",
+ " <td id=\"T_637af_row12_col59\" class=\"data row12 col59\" >-0.013605</td>\n",
+ " <td id=\"T_637af_row12_col60\" class=\"data row12 col60\" >0.776734</td>\n",
+ " <td id=\"T_637af_row12_col61\" class=\"data row12 col61\" >0.131424</td>\n",
+ " <td id=\"T_637af_row12_col62\" class=\"data row12 col62\" >1.189480</td>\n",
+ " <td id=\"T_637af_row12_col63\" class=\"data row12 col63\" >-0.666317</td>\n",
+ " <td id=\"T_637af_row12_col64\" class=\"data row12 col64\" >-0.939036</td>\n",
+ " <td id=\"T_637af_row12_col65\" class=\"data row12 col65\" >1.105515</td>\n",
+ " <td id=\"T_637af_row12_col66\" class=\"data row12 col66\" >0.621452</td>\n",
+ " <td id=\"T_637af_row12_col67\" class=\"data row12 col67\" >1.586605</td>\n",
+ " <td id=\"T_637af_row12_col68\" class=\"data row12 col68\" >-0.760970</td>\n",
+ " <td id=\"T_637af_row12_col69\" class=\"data row12 col69\" >1.649646</td>\n",
+ " <td id=\"T_637af_row12_col70\" class=\"data row12 col70\" >0.283199</td>\n",
+ " <td id=\"T_637af_row12_col71\" class=\"data row12 col71\" >1.275812</td>\n",
+ " <td id=\"T_637af_row12_col72\" class=\"data row12 col72\" >-0.452012</td>\n",
+ " <td id=\"T_637af_row12_col73\" class=\"data row12 col73\" >0.301361</td>\n",
+ " <td id=\"T_637af_row12_col74\" class=\"data row12 col74\" >-0.976951</td>\n",
+ " <td id=\"T_637af_row12_col75\" class=\"data row12 col75\" >-0.268106</td>\n",
+ " <td id=\"T_637af_row12_col76\" class=\"data row12 col76\" >-0.079255</td>\n",
+ " <td id=\"T_637af_row12_col77\" class=\"data row12 col77\" >-1.258332</td>\n",
+ " <td id=\"T_637af_row12_col78\" class=\"data row12 col78\" >2.216658</td>\n",
+ " <td id=\"T_637af_row12_col79\" class=\"data row12 col79\" >-1.175988</td>\n",
+ " <td id=\"T_637af_row12_col80\" class=\"data row12 col80\" >-0.863497</td>\n",
+ " <td id=\"T_637af_row12_col81\" class=\"data row12 col81\" >-1.653022</td>\n",
+ " <td id=\"T_637af_row12_col82\" class=\"data row12 col82\" >-0.561514</td>\n",
+ " <td id=\"T_637af_row12_col83\" class=\"data row12 col83\" >0.450753</td>\n",
+ " <td id=\"T_637af_row12_col84\" class=\"data row12 col84\" >0.417200</td>\n",
+ " <td id=\"T_637af_row12_col85\" class=\"data row12 col85\" >0.094676</td>\n",
+ " <td id=\"T_637af_row12_col86\" class=\"data row12 col86\" >-2.231054</td>\n",
+ " <td id=\"T_637af_row12_col87\" class=\"data row12 col87\" >1.316862</td>\n",
+ " <td id=\"T_637af_row12_col88\" class=\"data row12 col88\" >-0.477441</td>\n",
+ " <td id=\"T_637af_row12_col89\" class=\"data row12 col89\" >0.646654</td>\n",
+ " <td id=\"T_637af_row12_col90\" class=\"data row12 col90\" >-0.200252</td>\n",
+ " <td id=\"T_637af_row12_col91\" class=\"data row12 col91\" >1.074354</td>\n",
+ " <td id=\"T_637af_row12_col92\" class=\"data row12 col92\" >-0.058176</td>\n",
+ " <td id=\"T_637af_row12_col93\" class=\"data row12 col93\" >0.120990</td>\n",
+ " <td id=\"T_637af_row12_col94\" class=\"data row12 col94\" >0.222522</td>\n",
+ " <td id=\"T_637af_row12_col95\" class=\"data row12 col95\" >-0.179507</td>\n",
+ " <td id=\"T_637af_row12_col96\" class=\"data row12 col96\" >0.421655</td>\n",
+ " <td id=\"T_637af_row12_col97\" class=\"data row12 col97\" >-0.914341</td>\n",
+ " <td id=\"T_637af_row12_col98\" class=\"data row12 col98\" >-0.234178</td>\n",
+ " <td id=\"T_637af_row12_col99\" class=\"data row12 col99\" >0.741524</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row13\" class=\"row_heading level0 row13\" >13</th>\n",
+ " <td id=\"T_637af_row13_col0\" class=\"data row13 col0\" >0.932714</td>\n",
+ " <td id=\"T_637af_row13_col1\" class=\"data row13 col1\" >1.423761</td>\n",
+ " <td id=\"T_637af_row13_col2\" class=\"data row13 col2\" >-1.280835</td>\n",
+ " <td id=\"T_637af_row13_col3\" class=\"data row13 col3\" >0.347882</td>\n",
+ " <td id=\"T_637af_row13_col4\" class=\"data row13 col4\" >-0.863171</td>\n",
+ " <td id=\"T_637af_row13_col5\" class=\"data row13 col5\" >-0.852580</td>\n",
+ " <td id=\"T_637af_row13_col6\" class=\"data row13 col6\" >1.044933</td>\n",
+ " <td id=\"T_637af_row13_col7\" class=\"data row13 col7\" >2.094536</td>\n",
+ " <td id=\"T_637af_row13_col8\" class=\"data row13 col8\" >0.806206</td>\n",
+ " <td id=\"T_637af_row13_col9\" class=\"data row13 col9\" >0.416201</td>\n",
+ " <td id=\"T_637af_row13_col10\" class=\"data row13 col10\" >-1.109503</td>\n",
+ " <td id=\"T_637af_row13_col11\" class=\"data row13 col11\" >0.145302</td>\n",
+ " <td id=\"T_637af_row13_col12\" class=\"data row13 col12\" >-0.996871</td>\n",
+ " <td id=\"T_637af_row13_col13\" class=\"data row13 col13\" >0.325456</td>\n",
+ " <td id=\"T_637af_row13_col14\" class=\"data row13 col14\" >-0.605081</td>\n",
+ " <td id=\"T_637af_row13_col15\" class=\"data row13 col15\" >1.175326</td>\n",
+ " <td id=\"T_637af_row13_col16\" class=\"data row13 col16\" >1.645054</td>\n",
+ " <td id=\"T_637af_row13_col17\" class=\"data row13 col17\" >0.293432</td>\n",
+ " <td id=\"T_637af_row13_col18\" class=\"data row13 col18\" >-2.766822</td>\n",
+ " <td id=\"T_637af_row13_col19\" class=\"data row13 col19\" >1.032849</td>\n",
+ " <td id=\"T_637af_row13_col20\" class=\"data row13 col20\" >0.079115</td>\n",
+ " <td id=\"T_637af_row13_col21\" class=\"data row13 col21\" >-1.414132</td>\n",
+ " <td id=\"T_637af_row13_col22\" class=\"data row13 col22\" >1.463376</td>\n",
+ " <td id=\"T_637af_row13_col23\" class=\"data row13 col23\" >2.335486</td>\n",
+ " <td id=\"T_637af_row13_col24\" class=\"data row13 col24\" >0.411951</td>\n",
+ " <td id=\"T_637af_row13_col25\" class=\"data row13 col25\" >-0.048543</td>\n",
+ " <td id=\"T_637af_row13_col26\" class=\"data row13 col26\" >0.159284</td>\n",
+ " <td id=\"T_637af_row13_col27\" class=\"data row13 col27\" >-0.651554</td>\n",
+ " <td id=\"T_637af_row13_col28\" class=\"data row13 col28\" >-1.093128</td>\n",
+ " <td id=\"T_637af_row13_col29\" class=\"data row13 col29\" >1.568390</td>\n",
+ " <td id=\"T_637af_row13_col30\" class=\"data row13 col30\" >-0.077807</td>\n",
+ " <td id=\"T_637af_row13_col31\" class=\"data row13 col31\" >-2.390779</td>\n",
+ " <td id=\"T_637af_row13_col32\" class=\"data row13 col32\" >-0.842346</td>\n",
+ " <td id=\"T_637af_row13_col33\" class=\"data row13 col33\" >-0.229675</td>\n",
+ " <td id=\"T_637af_row13_col34\" class=\"data row13 col34\" >-0.999072</td>\n",
+ " <td id=\"T_637af_row13_col35\" class=\"data row13 col35\" >-1.367219</td>\n",
+ " <td id=\"T_637af_row13_col36\" class=\"data row13 col36\" >-0.792042</td>\n",
+ " <td id=\"T_637af_row13_col37\" class=\"data row13 col37\" >-1.878575</td>\n",
+ " <td id=\"T_637af_row13_col38\" class=\"data row13 col38\" >1.451452</td>\n",
+ " <td id=\"T_637af_row13_col39\" class=\"data row13 col39\" >1.266250</td>\n",
+ " <td id=\"T_637af_row13_col40\" class=\"data row13 col40\" >-0.734315</td>\n",
+ " <td id=\"T_637af_row13_col41\" class=\"data row13 col41\" >0.266152</td>\n",
+ " <td id=\"T_637af_row13_col42\" class=\"data row13 col42\" >0.735523</td>\n",
+ " <td id=\"T_637af_row13_col43\" class=\"data row13 col43\" >-0.430860</td>\n",
+ " <td id=\"T_637af_row13_col44\" class=\"data row13 col44\" >0.229864</td>\n",
+ " <td id=\"T_637af_row13_col45\" class=\"data row13 col45\" >0.850083</td>\n",
+ " <td id=\"T_637af_row13_col46\" class=\"data row13 col46\" >-2.241241</td>\n",
+ " <td id=\"T_637af_row13_col47\" class=\"data row13 col47\" >1.063850</td>\n",
+ " <td id=\"T_637af_row13_col48\" class=\"data row13 col48\" >0.289409</td>\n",
+ " <td id=\"T_637af_row13_col49\" class=\"data row13 col49\" >-0.354360</td>\n",
+ " <td id=\"T_637af_row13_col50\" class=\"data row13 col50\" >0.113063</td>\n",
+ " <td id=\"T_637af_row13_col51\" class=\"data row13 col51\" >-0.173006</td>\n",
+ " <td id=\"T_637af_row13_col52\" class=\"data row13 col52\" >1.386998</td>\n",
+ " <td id=\"T_637af_row13_col53\" class=\"data row13 col53\" >1.886236</td>\n",
+ " <td id=\"T_637af_row13_col54\" class=\"data row13 col54\" >0.587119</td>\n",
+ " <td id=\"T_637af_row13_col55\" class=\"data row13 col55\" >-0.961133</td>\n",
+ " <td id=\"T_637af_row13_col56\" class=\"data row13 col56\" >0.399295</td>\n",
+ " <td id=\"T_637af_row13_col57\" class=\"data row13 col57\" >1.461560</td>\n",
+ " <td id=\"T_637af_row13_col58\" class=\"data row13 col58\" >0.310823</td>\n",
+ " <td id=\"T_637af_row13_col59\" class=\"data row13 col59\" >0.280220</td>\n",
+ " <td id=\"T_637af_row13_col60\" class=\"data row13 col60\" >-0.879103</td>\n",
+ " <td id=\"T_637af_row13_col61\" class=\"data row13 col61\" >-1.326348</td>\n",
+ " <td id=\"T_637af_row13_col62\" class=\"data row13 col62\" >0.003337</td>\n",
+ " <td id=\"T_637af_row13_col63\" class=\"data row13 col63\" >-1.085908</td>\n",
+ " <td id=\"T_637af_row13_col64\" class=\"data row13 col64\" >-0.436723</td>\n",
+ " <td id=\"T_637af_row13_col65\" class=\"data row13 col65\" >2.111926</td>\n",
+ " <td id=\"T_637af_row13_col66\" class=\"data row13 col66\" >0.106068</td>\n",
+ " <td id=\"T_637af_row13_col67\" class=\"data row13 col67\" >0.615597</td>\n",
+ " <td id=\"T_637af_row13_col68\" class=\"data row13 col68\" >2.152996</td>\n",
+ " <td id=\"T_637af_row13_col69\" class=\"data row13 col69\" >-0.196155</td>\n",
+ " <td id=\"T_637af_row13_col70\" class=\"data row13 col70\" >0.025747</td>\n",
+ " <td id=\"T_637af_row13_col71\" class=\"data row13 col71\" >-0.039061</td>\n",
+ " <td id=\"T_637af_row13_col72\" class=\"data row13 col72\" >0.656823</td>\n",
+ " <td id=\"T_637af_row13_col73\" class=\"data row13 col73\" >-0.347105</td>\n",
+ " <td id=\"T_637af_row13_col74\" class=\"data row13 col74\" >2.513979</td>\n",
+ " <td id=\"T_637af_row13_col75\" class=\"data row13 col75\" >1.758070</td>\n",
+ " <td id=\"T_637af_row13_col76\" class=\"data row13 col76\" >1.288473</td>\n",
+ " <td id=\"T_637af_row13_col77\" class=\"data row13 col77\" >-0.739185</td>\n",
+ " <td id=\"T_637af_row13_col78\" class=\"data row13 col78\" >-0.691592</td>\n",
+ " <td id=\"T_637af_row13_col79\" class=\"data row13 col79\" >-0.098728</td>\n",
+ " <td id=\"T_637af_row13_col80\" class=\"data row13 col80\" >-0.276386</td>\n",
+ " <td id=\"T_637af_row13_col81\" class=\"data row13 col81\" >0.489981</td>\n",
+ " <td id=\"T_637af_row13_col82\" class=\"data row13 col82\" >0.516278</td>\n",
+ " <td id=\"T_637af_row13_col83\" class=\"data row13 col83\" >-0.838258</td>\n",
+ " <td id=\"T_637af_row13_col84\" class=\"data row13 col84\" >0.596673</td>\n",
+ " <td id=\"T_637af_row13_col85\" class=\"data row13 col85\" >-0.331053</td>\n",
+ " <td id=\"T_637af_row13_col86\" class=\"data row13 col86\" >0.521174</td>\n",
+ " <td id=\"T_637af_row13_col87\" class=\"data row13 col87\" >-0.145023</td>\n",
+ " <td id=\"T_637af_row13_col88\" class=\"data row13 col88\" >0.836693</td>\n",
+ " <td id=\"T_637af_row13_col89\" class=\"data row13 col89\" >-1.092166</td>\n",
+ " <td id=\"T_637af_row13_col90\" class=\"data row13 col90\" >0.361733</td>\n",
+ " <td id=\"T_637af_row13_col91\" class=\"data row13 col91\" >-1.169981</td>\n",
+ " <td id=\"T_637af_row13_col92\" class=\"data row13 col92\" >0.046731</td>\n",
+ " <td id=\"T_637af_row13_col93\" class=\"data row13 col93\" >0.655377</td>\n",
+ " <td id=\"T_637af_row13_col94\" class=\"data row13 col94\" >-0.756852</td>\n",
+ " <td id=\"T_637af_row13_col95\" class=\"data row13 col95\" >1.285805</td>\n",
+ " <td id=\"T_637af_row13_col96\" class=\"data row13 col96\" >-0.095019</td>\n",
+ " <td id=\"T_637af_row13_col97\" class=\"data row13 col97\" >0.360253</td>\n",
+ " <td id=\"T_637af_row13_col98\" class=\"data row13 col98\" >1.370621</td>\n",
+ " <td id=\"T_637af_row13_col99\" class=\"data row13 col99\" >0.083010</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row14\" class=\"row_heading level0 row14\" >14</th>\n",
+ " <td id=\"T_637af_row14_col0\" class=\"data row14 col0\" >0.888893</td>\n",
+ " <td id=\"T_637af_row14_col1\" class=\"data row14 col1\" >2.288725</td>\n",
+ " <td id=\"T_637af_row14_col2\" class=\"data row14 col2\" >-1.032332</td>\n",
+ " <td id=\"T_637af_row14_col3\" class=\"data row14 col3\" >0.212273</td>\n",
+ " <td id=\"T_637af_row14_col4\" class=\"data row14 col4\" >-1.091826</td>\n",
+ " <td id=\"T_637af_row14_col5\" class=\"data row14 col5\" >1.692498</td>\n",
+ " <td id=\"T_637af_row14_col6\" class=\"data row14 col6\" >1.025367</td>\n",
+ " <td id=\"T_637af_row14_col7\" class=\"data row14 col7\" >0.550854</td>\n",
+ " <td id=\"T_637af_row14_col8\" class=\"data row14 col8\" >0.679430</td>\n",
+ " <td id=\"T_637af_row14_col9\" class=\"data row14 col9\" >-1.335712</td>\n",
+ " <td id=\"T_637af_row14_col10\" class=\"data row14 col10\" >-0.798341</td>\n",
+ " <td id=\"T_637af_row14_col11\" class=\"data row14 col11\" >2.265351</td>\n",
+ " <td id=\"T_637af_row14_col12\" class=\"data row14 col12\" >-1.006938</td>\n",
+ " <td id=\"T_637af_row14_col13\" class=\"data row14 col13\" >2.059761</td>\n",
+ " <td id=\"T_637af_row14_col14\" class=\"data row14 col14\" >0.420266</td>\n",
+ " <td id=\"T_637af_row14_col15\" class=\"data row14 col15\" >-1.189657</td>\n",
+ " <td id=\"T_637af_row14_col16\" class=\"data row14 col16\" >0.506674</td>\n",
+ " <td id=\"T_637af_row14_col17\" class=\"data row14 col17\" >0.260847</td>\n",
+ " <td id=\"T_637af_row14_col18\" class=\"data row14 col18\" >-0.533145</td>\n",
+ " <td id=\"T_637af_row14_col19\" class=\"data row14 col19\" >0.727267</td>\n",
+ " <td id=\"T_637af_row14_col20\" class=\"data row14 col20\" >1.412276</td>\n",
+ " <td id=\"T_637af_row14_col21\" class=\"data row14 col21\" >1.482106</td>\n",
+ " <td id=\"T_637af_row14_col22\" class=\"data row14 col22\" >-0.996258</td>\n",
+ " <td id=\"T_637af_row14_col23\" class=\"data row14 col23\" >0.588641</td>\n",
+ " <td id=\"T_637af_row14_col24\" class=\"data row14 col24\" >-0.412642</td>\n",
+ " <td id=\"T_637af_row14_col25\" class=\"data row14 col25\" >-0.920733</td>\n",
+ " <td id=\"T_637af_row14_col26\" class=\"data row14 col26\" >-0.874691</td>\n",
+ " <td id=\"T_637af_row14_col27\" class=\"data row14 col27\" >0.839002</td>\n",
+ " <td id=\"T_637af_row14_col28\" class=\"data row14 col28\" >0.501668</td>\n",
+ " <td id=\"T_637af_row14_col29\" class=\"data row14 col29\" >-0.342493</td>\n",
+ " <td id=\"T_637af_row14_col30\" class=\"data row14 col30\" >-0.533806</td>\n",
+ " <td id=\"T_637af_row14_col31\" class=\"data row14 col31\" >-2.146352</td>\n",
+ " <td id=\"T_637af_row14_col32\" class=\"data row14 col32\" >-0.597339</td>\n",
+ " <td id=\"T_637af_row14_col33\" class=\"data row14 col33\" >0.115726</td>\n",
+ " <td id=\"T_637af_row14_col34\" class=\"data row14 col34\" >0.850683</td>\n",
+ " <td id=\"T_637af_row14_col35\" class=\"data row14 col35\" >-0.752239</td>\n",
+ " <td id=\"T_637af_row14_col36\" class=\"data row14 col36\" >0.377263</td>\n",
+ " <td id=\"T_637af_row14_col37\" class=\"data row14 col37\" >-0.561982</td>\n",
+ " <td id=\"T_637af_row14_col38\" class=\"data row14 col38\" >0.262783</td>\n",
+ " <td id=\"T_637af_row14_col39\" class=\"data row14 col39\" >-0.356676</td>\n",
+ " <td id=\"T_637af_row14_col40\" class=\"data row14 col40\" >-0.367462</td>\n",
+ " <td id=\"T_637af_row14_col41\" class=\"data row14 col41\" >0.753611</td>\n",
+ " <td id=\"T_637af_row14_col42\" class=\"data row14 col42\" >-1.267414</td>\n",
+ " <td id=\"T_637af_row14_col43\" class=\"data row14 col43\" >-1.330698</td>\n",
+ " <td id=\"T_637af_row14_col44\" class=\"data row14 col44\" >-0.536453</td>\n",
+ " <td id=\"T_637af_row14_col45\" class=\"data row14 col45\" >0.840938</td>\n",
+ " <td id=\"T_637af_row14_col46\" class=\"data row14 col46\" >-0.763108</td>\n",
+ " <td id=\"T_637af_row14_col47\" class=\"data row14 col47\" >-0.268100</td>\n",
+ " <td id=\"T_637af_row14_col48\" class=\"data row14 col48\" >-0.677424</td>\n",
+ " <td id=\"T_637af_row14_col49\" class=\"data row14 col49\" >1.606831</td>\n",
+ " <td id=\"T_637af_row14_col50\" class=\"data row14 col50\" >0.151732</td>\n",
+ " <td id=\"T_637af_row14_col51\" class=\"data row14 col51\" >-2.085701</td>\n",
+ " <td id=\"T_637af_row14_col52\" class=\"data row14 col52\" >1.219296</td>\n",
+ " <td id=\"T_637af_row14_col53\" class=\"data row14 col53\" >0.400863</td>\n",
+ " <td id=\"T_637af_row14_col54\" class=\"data row14 col54\" >0.591165</td>\n",
+ " <td id=\"T_637af_row14_col55\" class=\"data row14 col55\" >-1.485213</td>\n",
+ " <td id=\"T_637af_row14_col56\" class=\"data row14 col56\" >1.501979</td>\n",
+ " <td id=\"T_637af_row14_col57\" class=\"data row14 col57\" >1.196569</td>\n",
+ " <td id=\"T_637af_row14_col58\" class=\"data row14 col58\" >-0.214154</td>\n",
+ " <td id=\"T_637af_row14_col59\" class=\"data row14 col59\" >0.339554</td>\n",
+ " <td id=\"T_637af_row14_col60\" class=\"data row14 col60\" >-0.034446</td>\n",
+ " <td id=\"T_637af_row14_col61\" class=\"data row14 col61\" >1.176452</td>\n",
+ " <td id=\"T_637af_row14_col62\" class=\"data row14 col62\" >0.546340</td>\n",
+ " <td id=\"T_637af_row14_col63\" class=\"data row14 col63\" >-1.255630</td>\n",
+ " <td id=\"T_637af_row14_col64\" class=\"data row14 col64\" >-1.309210</td>\n",
+ " <td id=\"T_637af_row14_col65\" class=\"data row14 col65\" >-0.445437</td>\n",
+ " <td id=\"T_637af_row14_col66\" class=\"data row14 col66\" >0.189437</td>\n",
+ " <td id=\"T_637af_row14_col67\" class=\"data row14 col67\" >-0.737463</td>\n",
+ " <td id=\"T_637af_row14_col68\" class=\"data row14 col68\" >0.843767</td>\n",
+ " <td id=\"T_637af_row14_col69\" class=\"data row14 col69\" >-0.605632</td>\n",
+ " <td id=\"T_637af_row14_col70\" class=\"data row14 col70\" >-0.060777</td>\n",
+ " <td id=\"T_637af_row14_col71\" class=\"data row14 col71\" >0.409310</td>\n",
+ " <td id=\"T_637af_row14_col72\" class=\"data row14 col72\" >1.285569</td>\n",
+ " <td id=\"T_637af_row14_col73\" class=\"data row14 col73\" >-0.622638</td>\n",
+ " <td id=\"T_637af_row14_col74\" class=\"data row14 col74\" >1.018193</td>\n",
+ " <td id=\"T_637af_row14_col75\" class=\"data row14 col75\" >0.880680</td>\n",
+ " <td id=\"T_637af_row14_col76\" class=\"data row14 col76\" >0.046805</td>\n",
+ " <td id=\"T_637af_row14_col77\" class=\"data row14 col77\" >-1.818058</td>\n",
+ " <td id=\"T_637af_row14_col78\" class=\"data row14 col78\" >-0.809829</td>\n",
+ " <td id=\"T_637af_row14_col79\" class=\"data row14 col79\" >0.875224</td>\n",
+ " <td id=\"T_637af_row14_col80\" class=\"data row14 col80\" >0.409569</td>\n",
+ " <td id=\"T_637af_row14_col81\" class=\"data row14 col81\" >-0.116621</td>\n",
+ " <td id=\"T_637af_row14_col82\" class=\"data row14 col82\" >-1.238919</td>\n",
+ " <td id=\"T_637af_row14_col83\" class=\"data row14 col83\" >3.305724</td>\n",
+ " <td id=\"T_637af_row14_col84\" class=\"data row14 col84\" >-0.024121</td>\n",
+ " <td id=\"T_637af_row14_col85\" class=\"data row14 col85\" >-1.756500</td>\n",
+ " <td id=\"T_637af_row14_col86\" class=\"data row14 col86\" >1.328958</td>\n",
+ " <td id=\"T_637af_row14_col87\" class=\"data row14 col87\" >0.507593</td>\n",
+ " <td id=\"T_637af_row14_col88\" class=\"data row14 col88\" >-0.866554</td>\n",
+ " <td id=\"T_637af_row14_col89\" class=\"data row14 col89\" >-2.240848</td>\n",
+ " <td id=\"T_637af_row14_col90\" class=\"data row14 col90\" >-0.661376</td>\n",
+ " <td id=\"T_637af_row14_col91\" class=\"data row14 col91\" >-0.671824</td>\n",
+ " <td id=\"T_637af_row14_col92\" class=\"data row14 col92\" >0.215720</td>\n",
+ " <td id=\"T_637af_row14_col93\" class=\"data row14 col93\" >-0.296326</td>\n",
+ " <td id=\"T_637af_row14_col94\" class=\"data row14 col94\" >0.481402</td>\n",
+ " <td id=\"T_637af_row14_col95\" class=\"data row14 col95\" >0.829645</td>\n",
+ " <td id=\"T_637af_row14_col96\" class=\"data row14 col96\" >-0.721025</td>\n",
+ " <td id=\"T_637af_row14_col97\" class=\"data row14 col97\" >1.263914</td>\n",
+ " <td id=\"T_637af_row14_col98\" class=\"data row14 col98\" >0.549047</td>\n",
+ " <td id=\"T_637af_row14_col99\" class=\"data row14 col99\" >-1.234945</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_637af_level0_row15\" class=\"row_heading level0 row15\" >15</th>\n",
+ " <td id=\"T_637af_row15_col0\" class=\"data row15 col0\" >-1.978838</td>\n",
+ " <td id=\"T_637af_row15_col1\" class=\"data row15 col1\" >0.721823</td>\n",
+ " <td id=\"T_637af_row15_col2\" class=\"data row15 col2\" >-0.559067</td>\n",
+ " <td id=\"T_637af_row15_col3\" class=\"data row15 col3\" >-1.235243</td>\n",
+ " <td id=\"T_637af_row15_col4\" class=\"data row15 col4\" >0.420716</td>\n",
+ " <td id=\"T_637af_row15_col5\" class=\"data row15 col5\" >-0.598845</td>\n",
+ " <td id=\"T_637af_row15_col6\" class=\"data row15 col6\" >0.359576</td>\n",
+ " <td id=\"T_637af_row15_col7\" class=\"data row15 col7\" >-0.619366</td>\n",
+ " <td id=\"T_637af_row15_col8\" class=\"data row15 col8\" >-1.757772</td>\n",
+ " <td id=\"T_637af_row15_col9\" class=\"data row15 col9\" >-1.156251</td>\n",
+ " <td id=\"T_637af_row15_col10\" class=\"data row15 col10\" >0.705212</td>\n",
+ " <td id=\"T_637af_row15_col11\" class=\"data row15 col11\" >0.875071</td>\n",
+ " <td id=\"T_637af_row15_col12\" class=\"data row15 col12\" >-1.020376</td>\n",
+ " <td id=\"T_637af_row15_col13\" class=\"data row15 col13\" >0.394760</td>\n",
+ " <td id=\"T_637af_row15_col14\" class=\"data row15 col14\" >-0.147970</td>\n",
+ " <td id=\"T_637af_row15_col15\" class=\"data row15 col15\" >0.230249</td>\n",
+ " <td id=\"T_637af_row15_col16\" class=\"data row15 col16\" >1.355203</td>\n",
+ " <td id=\"T_637af_row15_col17\" class=\"data row15 col17\" >1.794488</td>\n",
+ " <td id=\"T_637af_row15_col18\" class=\"data row15 col18\" >2.678058</td>\n",
+ " <td id=\"T_637af_row15_col19\" class=\"data row15 col19\" >-0.153565</td>\n",
+ " <td id=\"T_637af_row15_col20\" class=\"data row15 col20\" >-0.460959</td>\n",
+ " <td id=\"T_637af_row15_col21\" class=\"data row15 col21\" >-0.098108</td>\n",
+ " <td id=\"T_637af_row15_col22\" class=\"data row15 col22\" >-1.407930</td>\n",
+ " <td id=\"T_637af_row15_col23\" class=\"data row15 col23\" >-2.487702</td>\n",
+ " <td id=\"T_637af_row15_col24\" class=\"data row15 col24\" >1.823014</td>\n",
+ " <td id=\"T_637af_row15_col25\" class=\"data row15 col25\" >0.099873</td>\n",
+ " <td id=\"T_637af_row15_col26\" class=\"data row15 col26\" >-0.517603</td>\n",
+ " <td id=\"T_637af_row15_col27\" class=\"data row15 col27\" >-0.509311</td>\n",
+ " <td id=\"T_637af_row15_col28\" class=\"data row15 col28\" >-1.833175</td>\n",
+ " <td id=\"T_637af_row15_col29\" class=\"data row15 col29\" >-0.900906</td>\n",
+ " <td id=\"T_637af_row15_col30\" class=\"data row15 col30\" >0.459493</td>\n",
+ " <td id=\"T_637af_row15_col31\" class=\"data row15 col31\" >-0.655440</td>\n",
+ " <td id=\"T_637af_row15_col32\" class=\"data row15 col32\" >1.466122</td>\n",
+ " <td id=\"T_637af_row15_col33\" class=\"data row15 col33\" >-1.531389</td>\n",
+ " <td id=\"T_637af_row15_col34\" class=\"data row15 col34\" >-0.422106</td>\n",
+ " <td id=\"T_637af_row15_col35\" class=\"data row15 col35\" >0.421422</td>\n",
+ " <td id=\"T_637af_row15_col36\" class=\"data row15 col36\" >0.578615</td>\n",
+ " <td id=\"T_637af_row15_col37\" class=\"data row15 col37\" >0.259795</td>\n",
+ " <td id=\"T_637af_row15_col38\" class=\"data row15 col38\" >0.018941</td>\n",
+ " <td id=\"T_637af_row15_col39\" class=\"data row15 col39\" >-0.168726</td>\n",
+ " <td id=\"T_637af_row15_col40\" class=\"data row15 col40\" >1.611107</td>\n",
+ " <td id=\"T_637af_row15_col41\" class=\"data row15 col41\" >-1.586550</td>\n",
+ " <td id=\"T_637af_row15_col42\" class=\"data row15 col42\" >-1.384941</td>\n",
+ " <td id=\"T_637af_row15_col43\" class=\"data row15 col43\" >0.858377</td>\n",
+ " <td id=\"T_637af_row15_col44\" class=\"data row15 col44\" >1.033242</td>\n",
+ " <td id=\"T_637af_row15_col45\" class=\"data row15 col45\" >1.701343</td>\n",
+ " <td id=\"T_637af_row15_col46\" class=\"data row15 col46\" >1.748344</td>\n",
+ " <td id=\"T_637af_row15_col47\" class=\"data row15 col47\" >-0.371182</td>\n",
+ " <td id=\"T_637af_row15_col48\" class=\"data row15 col48\" >-0.843575</td>\n",
+ " <td id=\"T_637af_row15_col49\" class=\"data row15 col49\" >2.089641</td>\n",
+ " <td id=\"T_637af_row15_col50\" class=\"data row15 col50\" >-0.345430</td>\n",
+ " <td id=\"T_637af_row15_col51\" class=\"data row15 col51\" >-1.740556</td>\n",
+ " <td id=\"T_637af_row15_col52\" class=\"data row15 col52\" >0.141915</td>\n",
+ " <td id=\"T_637af_row15_col53\" class=\"data row15 col53\" >-2.197138</td>\n",
+ " <td id=\"T_637af_row15_col54\" class=\"data row15 col54\" >0.689569</td>\n",
+ " <td id=\"T_637af_row15_col55\" class=\"data row15 col55\" >-0.150025</td>\n",
+ " <td id=\"T_637af_row15_col56\" class=\"data row15 col56\" >0.287456</td>\n",
+ " <td id=\"T_637af_row15_col57\" class=\"data row15 col57\" >0.654016</td>\n",
+ " <td id=\"T_637af_row15_col58\" class=\"data row15 col58\" >-1.521919</td>\n",
+ " <td id=\"T_637af_row15_col59\" class=\"data row15 col59\" >-0.918008</td>\n",
+ " <td id=\"T_637af_row15_col60\" class=\"data row15 col60\" >-0.587528</td>\n",
+ " <td id=\"T_637af_row15_col61\" class=\"data row15 col61\" >0.230636</td>\n",
+ " <td id=\"T_637af_row15_col62\" class=\"data row15 col62\" >0.262637</td>\n",
+ " <td id=\"T_637af_row15_col63\" class=\"data row15 col63\" >0.615674</td>\n",
+ " <td id=\"T_637af_row15_col64\" class=\"data row15 col64\" >0.600044</td>\n",
+ " <td id=\"T_637af_row15_col65\" class=\"data row15 col65\" >-0.494699</td>\n",
+ " <td id=\"T_637af_row15_col66\" class=\"data row15 col66\" >-0.743089</td>\n",
+ " <td id=\"T_637af_row15_col67\" class=\"data row15 col67\" >0.220026</td>\n",
+ " <td id=\"T_637af_row15_col68\" class=\"data row15 col68\" >-0.242207</td>\n",
+ " <td id=\"T_637af_row15_col69\" class=\"data row15 col69\" >0.528216</td>\n",
+ " <td id=\"T_637af_row15_col70\" class=\"data row15 col70\" >-0.328174</td>\n",
+ " <td id=\"T_637af_row15_col71\" class=\"data row15 col71\" >-1.536517</td>\n",
+ " <td id=\"T_637af_row15_col72\" class=\"data row15 col72\" >-1.476640</td>\n",
+ " <td id=\"T_637af_row15_col73\" class=\"data row15 col73\" >-1.162114</td>\n",
+ " <td id=\"T_637af_row15_col74\" class=\"data row15 col74\" >-1.260222</td>\n",
+ " <td id=\"T_637af_row15_col75\" class=\"data row15 col75\" >1.106252</td>\n",
+ " <td id=\"T_637af_row15_col76\" class=\"data row15 col76\" >-1.467408</td>\n",
+ " <td id=\"T_637af_row15_col77\" class=\"data row15 col77\" >-0.349341</td>\n",
+ " <td id=\"T_637af_row15_col78\" class=\"data row15 col78\" >-1.841217</td>\n",
+ " <td id=\"T_637af_row15_col79\" class=\"data row15 col79\" >0.031296</td>\n",
+ " <td id=\"T_637af_row15_col80\" class=\"data row15 col80\" >-0.076475</td>\n",
+ " <td id=\"T_637af_row15_col81\" class=\"data row15 col81\" >-0.353383</td>\n",
+ " <td id=\"T_637af_row15_col82\" class=\"data row15 col82\" >0.807545</td>\n",
+ " <td id=\"T_637af_row15_col83\" class=\"data row15 col83\" >0.779064</td>\n",
+ " <td id=\"T_637af_row15_col84\" class=\"data row15 col84\" >-2.398417</td>\n",
+ " <td id=\"T_637af_row15_col85\" class=\"data row15 col85\" >-0.267828</td>\n",
+ " <td id=\"T_637af_row15_col86\" class=\"data row15 col86\" >1.549734</td>\n",
+ " <td id=\"T_637af_row15_col87\" class=\"data row15 col87\" >0.814397</td>\n",
+ " <td id=\"T_637af_row15_col88\" class=\"data row15 col88\" >0.284770</td>\n",
+ " <td id=\"T_637af_row15_col89\" class=\"data row15 col89\" >-0.659369</td>\n",
+ " <td id=\"T_637af_row15_col90\" class=\"data row15 col90\" >0.761040</td>\n",
+ " <td id=\"T_637af_row15_col91\" class=\"data row15 col91\" >-0.722067</td>\n",
+ " <td id=\"T_637af_row15_col92\" class=\"data row15 col92\" >0.810332</td>\n",
+ " <td id=\"T_637af_row15_col93\" class=\"data row15 col93\" >1.501295</td>\n",
+ " <td id=\"T_637af_row15_col94\" class=\"data row15 col94\" >1.440865</td>\n",
+ " <td id=\"T_637af_row15_col95\" class=\"data row15 col95\" >-1.367459</td>\n",
+ " <td id=\"T_637af_row15_col96\" class=\"data row15 col96\" >-0.700301</td>\n",
+ " <td id=\"T_637af_row15_col97\" class=\"data row15 col97\" >-1.540662</td>\n",
+ " <td id=\"T_637af_row15_col98\" class=\"data row15 col98\" >0.159837</td>\n",
+ " <td id=\"T_637af_row15_col99\" class=\"data row15 col99\" >-0.625415</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9c0adbf340>"
+ ]
+ },
+ "execution_count": 65,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "bigdf = pd.DataFrame(np.random.randn(16, 100))\n",
+ "bigdf.style.set_sticky(axis=\"index\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is also possible to stick MultiIndexes and even only specific levels."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 66,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.137619Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.137405Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.166122Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.165545Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_168ea thead tr th:nth-child(2) {\n",
+ " position: sticky;\n",
+ " background-color: inherit;\n",
+ " left: 0px;\n",
+ " min-width: 18px;\n",
+ " max-width: 18px;\n",
+ " z-index: 3 !important;\n",
+ "}\n",
+ "#T_168ea tbody tr th.level1 {\n",
+ " position: sticky;\n",
+ " background-color: inherit;\n",
+ " left: 0px;\n",
+ " min-width: 18px;\n",
+ " max-width: 18px;\n",
+ " z-index: 1;\n",
+ "}\n",
+ "#T_168ea thead tr th:nth-child(3) {\n",
+ " position: sticky;\n",
+ " background-color: inherit;\n",
+ " left: 18px;\n",
+ " min-width: 18px;\n",
+ " max-width: 18px;\n",
+ " z-index: 3 !important;\n",
+ "}\n",
+ "#T_168ea tbody tr th.level2 {\n",
+ " position: sticky;\n",
+ " background-color: inherit;\n",
+ " left: 18px;\n",
+ " min-width: 18px;\n",
+ " max-width: 18px;\n",
+ " z-index: 1;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_168ea\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank\" > </th>\n",
+ " <th class=\"blank\" > </th>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_168ea_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_168ea_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_168ea_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " <th id=\"T_168ea_level0_col3\" class=\"col_heading level0 col3\" >3</th>\n",
+ " <th id=\"T_168ea_level0_col4\" class=\"col_heading level0 col4\" >4</th>\n",
+ " <th id=\"T_168ea_level0_col5\" class=\"col_heading level0 col5\" >5</th>\n",
+ " <th id=\"T_168ea_level0_col6\" class=\"col_heading level0 col6\" >6</th>\n",
+ " <th id=\"T_168ea_level0_col7\" class=\"col_heading level0 col7\" >7</th>\n",
+ " <th id=\"T_168ea_level0_col8\" class=\"col_heading level0 col8\" >8</th>\n",
+ " <th id=\"T_168ea_level0_col9\" class=\"col_heading level0 col9\" >9</th>\n",
+ " <th id=\"T_168ea_level0_col10\" class=\"col_heading level0 col10\" >10</th>\n",
+ " <th id=\"T_168ea_level0_col11\" class=\"col_heading level0 col11\" >11</th>\n",
+ " <th id=\"T_168ea_level0_col12\" class=\"col_heading level0 col12\" >12</th>\n",
+ " <th id=\"T_168ea_level0_col13\" class=\"col_heading level0 col13\" >13</th>\n",
+ " <th id=\"T_168ea_level0_col14\" class=\"col_heading level0 col14\" >14</th>\n",
+ " <th id=\"T_168ea_level0_col15\" class=\"col_heading level0 col15\" >15</th>\n",
+ " <th id=\"T_168ea_level0_col16\" class=\"col_heading level0 col16\" >16</th>\n",
+ " <th id=\"T_168ea_level0_col17\" class=\"col_heading level0 col17\" >17</th>\n",
+ " <th id=\"T_168ea_level0_col18\" class=\"col_heading level0 col18\" >18</th>\n",
+ " <th id=\"T_168ea_level0_col19\" class=\"col_heading level0 col19\" >19</th>\n",
+ " <th id=\"T_168ea_level0_col20\" class=\"col_heading level0 col20\" >20</th>\n",
+ " <th id=\"T_168ea_level0_col21\" class=\"col_heading level0 col21\" >21</th>\n",
+ " <th id=\"T_168ea_level0_col22\" class=\"col_heading level0 col22\" >22</th>\n",
+ " <th id=\"T_168ea_level0_col23\" class=\"col_heading level0 col23\" >23</th>\n",
+ " <th id=\"T_168ea_level0_col24\" class=\"col_heading level0 col24\" >24</th>\n",
+ " <th id=\"T_168ea_level0_col25\" class=\"col_heading level0 col25\" >25</th>\n",
+ " <th id=\"T_168ea_level0_col26\" class=\"col_heading level0 col26\" >26</th>\n",
+ " <th id=\"T_168ea_level0_col27\" class=\"col_heading level0 col27\" >27</th>\n",
+ " <th id=\"T_168ea_level0_col28\" class=\"col_heading level0 col28\" >28</th>\n",
+ " <th id=\"T_168ea_level0_col29\" class=\"col_heading level0 col29\" >29</th>\n",
+ " <th id=\"T_168ea_level0_col30\" class=\"col_heading level0 col30\" >30</th>\n",
+ " <th id=\"T_168ea_level0_col31\" class=\"col_heading level0 col31\" >31</th>\n",
+ " <th id=\"T_168ea_level0_col32\" class=\"col_heading level0 col32\" >32</th>\n",
+ " <th id=\"T_168ea_level0_col33\" class=\"col_heading level0 col33\" >33</th>\n",
+ " <th id=\"T_168ea_level0_col34\" class=\"col_heading level0 col34\" >34</th>\n",
+ " <th id=\"T_168ea_level0_col35\" class=\"col_heading level0 col35\" >35</th>\n",
+ " <th id=\"T_168ea_level0_col36\" class=\"col_heading level0 col36\" >36</th>\n",
+ " <th id=\"T_168ea_level0_col37\" class=\"col_heading level0 col37\" >37</th>\n",
+ " <th id=\"T_168ea_level0_col38\" class=\"col_heading level0 col38\" >38</th>\n",
+ " <th id=\"T_168ea_level0_col39\" class=\"col_heading level0 col39\" >39</th>\n",
+ " <th id=\"T_168ea_level0_col40\" class=\"col_heading level0 col40\" >40</th>\n",
+ " <th id=\"T_168ea_level0_col41\" class=\"col_heading level0 col41\" >41</th>\n",
+ " <th id=\"T_168ea_level0_col42\" class=\"col_heading level0 col42\" >42</th>\n",
+ " <th id=\"T_168ea_level0_col43\" class=\"col_heading level0 col43\" >43</th>\n",
+ " <th id=\"T_168ea_level0_col44\" class=\"col_heading level0 col44\" >44</th>\n",
+ " <th id=\"T_168ea_level0_col45\" class=\"col_heading level0 col45\" >45</th>\n",
+ " <th id=\"T_168ea_level0_col46\" class=\"col_heading level0 col46\" >46</th>\n",
+ " <th id=\"T_168ea_level0_col47\" class=\"col_heading level0 col47\" >47</th>\n",
+ " <th id=\"T_168ea_level0_col48\" class=\"col_heading level0 col48\" >48</th>\n",
+ " <th id=\"T_168ea_level0_col49\" class=\"col_heading level0 col49\" >49</th>\n",
+ " <th id=\"T_168ea_level0_col50\" class=\"col_heading level0 col50\" >50</th>\n",
+ " <th id=\"T_168ea_level0_col51\" class=\"col_heading level0 col51\" >51</th>\n",
+ " <th id=\"T_168ea_level0_col52\" class=\"col_heading level0 col52\" >52</th>\n",
+ " <th id=\"T_168ea_level0_col53\" class=\"col_heading level0 col53\" >53</th>\n",
+ " <th id=\"T_168ea_level0_col54\" class=\"col_heading level0 col54\" >54</th>\n",
+ " <th id=\"T_168ea_level0_col55\" class=\"col_heading level0 col55\" >55</th>\n",
+ " <th id=\"T_168ea_level0_col56\" class=\"col_heading level0 col56\" >56</th>\n",
+ " <th id=\"T_168ea_level0_col57\" class=\"col_heading level0 col57\" >57</th>\n",
+ " <th id=\"T_168ea_level0_col58\" class=\"col_heading level0 col58\" >58</th>\n",
+ " <th id=\"T_168ea_level0_col59\" class=\"col_heading level0 col59\" >59</th>\n",
+ " <th id=\"T_168ea_level0_col60\" class=\"col_heading level0 col60\" >60</th>\n",
+ " <th id=\"T_168ea_level0_col61\" class=\"col_heading level0 col61\" >61</th>\n",
+ " <th id=\"T_168ea_level0_col62\" class=\"col_heading level0 col62\" >62</th>\n",
+ " <th id=\"T_168ea_level0_col63\" class=\"col_heading level0 col63\" >63</th>\n",
+ " <th id=\"T_168ea_level0_col64\" class=\"col_heading level0 col64\" >64</th>\n",
+ " <th id=\"T_168ea_level0_col65\" class=\"col_heading level0 col65\" >65</th>\n",
+ " <th id=\"T_168ea_level0_col66\" class=\"col_heading level0 col66\" >66</th>\n",
+ " <th id=\"T_168ea_level0_col67\" class=\"col_heading level0 col67\" >67</th>\n",
+ " <th id=\"T_168ea_level0_col68\" class=\"col_heading level0 col68\" >68</th>\n",
+ " <th id=\"T_168ea_level0_col69\" class=\"col_heading level0 col69\" >69</th>\n",
+ " <th id=\"T_168ea_level0_col70\" class=\"col_heading level0 col70\" >70</th>\n",
+ " <th id=\"T_168ea_level0_col71\" class=\"col_heading level0 col71\" >71</th>\n",
+ " <th id=\"T_168ea_level0_col72\" class=\"col_heading level0 col72\" >72</th>\n",
+ " <th id=\"T_168ea_level0_col73\" class=\"col_heading level0 col73\" >73</th>\n",
+ " <th id=\"T_168ea_level0_col74\" class=\"col_heading level0 col74\" >74</th>\n",
+ " <th id=\"T_168ea_level0_col75\" class=\"col_heading level0 col75\" >75</th>\n",
+ " <th id=\"T_168ea_level0_col76\" class=\"col_heading level0 col76\" >76</th>\n",
+ " <th id=\"T_168ea_level0_col77\" class=\"col_heading level0 col77\" >77</th>\n",
+ " <th id=\"T_168ea_level0_col78\" class=\"col_heading level0 col78\" >78</th>\n",
+ " <th id=\"T_168ea_level0_col79\" class=\"col_heading level0 col79\" >79</th>\n",
+ " <th id=\"T_168ea_level0_col80\" class=\"col_heading level0 col80\" >80</th>\n",
+ " <th id=\"T_168ea_level0_col81\" class=\"col_heading level0 col81\" >81</th>\n",
+ " <th id=\"T_168ea_level0_col82\" class=\"col_heading level0 col82\" >82</th>\n",
+ " <th id=\"T_168ea_level0_col83\" class=\"col_heading level0 col83\" >83</th>\n",
+ " <th id=\"T_168ea_level0_col84\" class=\"col_heading level0 col84\" >84</th>\n",
+ " <th id=\"T_168ea_level0_col85\" class=\"col_heading level0 col85\" >85</th>\n",
+ " <th id=\"T_168ea_level0_col86\" class=\"col_heading level0 col86\" >86</th>\n",
+ " <th id=\"T_168ea_level0_col87\" class=\"col_heading level0 col87\" >87</th>\n",
+ " <th id=\"T_168ea_level0_col88\" class=\"col_heading level0 col88\" >88</th>\n",
+ " <th id=\"T_168ea_level0_col89\" class=\"col_heading level0 col89\" >89</th>\n",
+ " <th id=\"T_168ea_level0_col90\" class=\"col_heading level0 col90\" >90</th>\n",
+ " <th id=\"T_168ea_level0_col91\" class=\"col_heading level0 col91\" >91</th>\n",
+ " <th id=\"T_168ea_level0_col92\" class=\"col_heading level0 col92\" >92</th>\n",
+ " <th id=\"T_168ea_level0_col93\" class=\"col_heading level0 col93\" >93</th>\n",
+ " <th id=\"T_168ea_level0_col94\" class=\"col_heading level0 col94\" >94</th>\n",
+ " <th id=\"T_168ea_level0_col95\" class=\"col_heading level0 col95\" >95</th>\n",
+ " <th id=\"T_168ea_level0_col96\" class=\"col_heading level0 col96\" >96</th>\n",
+ " <th id=\"T_168ea_level0_col97\" class=\"col_heading level0 col97\" >97</th>\n",
+ " <th id=\"T_168ea_level0_col98\" class=\"col_heading level0 col98\" >98</th>\n",
+ " <th id=\"T_168ea_level0_col99\" class=\"col_heading level0 col99\" >99</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"8\">A</th>\n",
+ " <th id=\"T_168ea_level1_row0\" class=\"row_heading level1 row0\" rowspan=\"4\">0</th>\n",
+ " <th id=\"T_168ea_level2_row0\" class=\"row_heading level2 row0\" >0</th>\n",
+ " <td id=\"T_168ea_row0_col0\" class=\"data row0 col0\" >-0.773866</td>\n",
+ " <td id=\"T_168ea_row0_col1\" class=\"data row0 col1\" >-0.240521</td>\n",
+ " <td id=\"T_168ea_row0_col2\" class=\"data row0 col2\" >-0.217165</td>\n",
+ " <td id=\"T_168ea_row0_col3\" class=\"data row0 col3\" >1.173609</td>\n",
+ " <td id=\"T_168ea_row0_col4\" class=\"data row0 col4\" >0.686390</td>\n",
+ " <td id=\"T_168ea_row0_col5\" class=\"data row0 col5\" >0.008358</td>\n",
+ " <td id=\"T_168ea_row0_col6\" class=\"data row0 col6\" >0.696232</td>\n",
+ " <td id=\"T_168ea_row0_col7\" class=\"data row0 col7\" >0.173166</td>\n",
+ " <td id=\"T_168ea_row0_col8\" class=\"data row0 col8\" >0.620498</td>\n",
+ " <td id=\"T_168ea_row0_col9\" class=\"data row0 col9\" >0.504067</td>\n",
+ " <td id=\"T_168ea_row0_col10\" class=\"data row0 col10\" >0.428066</td>\n",
+ " <td id=\"T_168ea_row0_col11\" class=\"data row0 col11\" >-0.051824</td>\n",
+ " <td id=\"T_168ea_row0_col12\" class=\"data row0 col12\" >0.719915</td>\n",
+ " <td id=\"T_168ea_row0_col13\" class=\"data row0 col13\" >0.057165</td>\n",
+ " <td id=\"T_168ea_row0_col14\" class=\"data row0 col14\" >0.562808</td>\n",
+ " <td id=\"T_168ea_row0_col15\" class=\"data row0 col15\" >-0.369536</td>\n",
+ " <td id=\"T_168ea_row0_col16\" class=\"data row0 col16\" >0.483399</td>\n",
+ " <td id=\"T_168ea_row0_col17\" class=\"data row0 col17\" >0.620765</td>\n",
+ " <td id=\"T_168ea_row0_col18\" class=\"data row0 col18\" >-0.354342</td>\n",
+ " <td id=\"T_168ea_row0_col19\" class=\"data row0 col19\" >-1.469471</td>\n",
+ " <td id=\"T_168ea_row0_col20\" class=\"data row0 col20\" >-1.937266</td>\n",
+ " <td id=\"T_168ea_row0_col21\" class=\"data row0 col21\" >0.038031</td>\n",
+ " <td id=\"T_168ea_row0_col22\" class=\"data row0 col22\" >-1.518162</td>\n",
+ " <td id=\"T_168ea_row0_col23\" class=\"data row0 col23\" >-0.417599</td>\n",
+ " <td id=\"T_168ea_row0_col24\" class=\"data row0 col24\" >0.386717</td>\n",
+ " <td id=\"T_168ea_row0_col25\" class=\"data row0 col25\" >0.716193</td>\n",
+ " <td id=\"T_168ea_row0_col26\" class=\"data row0 col26\" >0.489961</td>\n",
+ " <td id=\"T_168ea_row0_col27\" class=\"data row0 col27\" >0.733957</td>\n",
+ " <td id=\"T_168ea_row0_col28\" class=\"data row0 col28\" >0.914415</td>\n",
+ " <td id=\"T_168ea_row0_col29\" class=\"data row0 col29\" >0.679894</td>\n",
+ " <td id=\"T_168ea_row0_col30\" class=\"data row0 col30\" >0.255448</td>\n",
+ " <td id=\"T_168ea_row0_col31\" class=\"data row0 col31\" >-0.508338</td>\n",
+ " <td id=\"T_168ea_row0_col32\" class=\"data row0 col32\" >0.332030</td>\n",
+ " <td id=\"T_168ea_row0_col33\" class=\"data row0 col33\" >-0.111107</td>\n",
+ " <td id=\"T_168ea_row0_col34\" class=\"data row0 col34\" >-0.251983</td>\n",
+ " <td id=\"T_168ea_row0_col35\" class=\"data row0 col35\" >-1.456620</td>\n",
+ " <td id=\"T_168ea_row0_col36\" class=\"data row0 col36\" >0.409630</td>\n",
+ " <td id=\"T_168ea_row0_col37\" class=\"data row0 col37\" >1.062320</td>\n",
+ " <td id=\"T_168ea_row0_col38\" class=\"data row0 col38\" >-0.577115</td>\n",
+ " <td id=\"T_168ea_row0_col39\" class=\"data row0 col39\" >0.718796</td>\n",
+ " <td id=\"T_168ea_row0_col40\" class=\"data row0 col40\" >-0.399260</td>\n",
+ " <td id=\"T_168ea_row0_col41\" class=\"data row0 col41\" >-1.311389</td>\n",
+ " <td id=\"T_168ea_row0_col42\" class=\"data row0 col42\" >0.649122</td>\n",
+ " <td id=\"T_168ea_row0_col43\" class=\"data row0 col43\" >0.091566</td>\n",
+ " <td id=\"T_168ea_row0_col44\" class=\"data row0 col44\" >0.628872</td>\n",
+ " <td id=\"T_168ea_row0_col45\" class=\"data row0 col45\" >0.297894</td>\n",
+ " <td id=\"T_168ea_row0_col46\" class=\"data row0 col46\" >-0.142290</td>\n",
+ " <td id=\"T_168ea_row0_col47\" class=\"data row0 col47\" >-0.542291</td>\n",
+ " <td id=\"T_168ea_row0_col48\" class=\"data row0 col48\" >-0.914290</td>\n",
+ " <td id=\"T_168ea_row0_col49\" class=\"data row0 col49\" >1.144514</td>\n",
+ " <td id=\"T_168ea_row0_col50\" class=\"data row0 col50\" >0.313584</td>\n",
+ " <td id=\"T_168ea_row0_col51\" class=\"data row0 col51\" >1.182635</td>\n",
+ " <td id=\"T_168ea_row0_col52\" class=\"data row0 col52\" >1.214235</td>\n",
+ " <td id=\"T_168ea_row0_col53\" class=\"data row0 col53\" >-0.416446</td>\n",
+ " <td id=\"T_168ea_row0_col54\" class=\"data row0 col54\" >-1.653940</td>\n",
+ " <td id=\"T_168ea_row0_col55\" class=\"data row0 col55\" >-2.550787</td>\n",
+ " <td id=\"T_168ea_row0_col56\" class=\"data row0 col56\" >0.442473</td>\n",
+ " <td id=\"T_168ea_row0_col57\" class=\"data row0 col57\" >0.052127</td>\n",
+ " <td id=\"T_168ea_row0_col58\" class=\"data row0 col58\" >-0.464469</td>\n",
+ " <td id=\"T_168ea_row0_col59\" class=\"data row0 col59\" >-0.523852</td>\n",
+ " <td id=\"T_168ea_row0_col60\" class=\"data row0 col60\" >0.989726</td>\n",
+ " <td id=\"T_168ea_row0_col61\" class=\"data row0 col61\" >-1.325539</td>\n",
+ " <td id=\"T_168ea_row0_col62\" class=\"data row0 col62\" >-0.199687</td>\n",
+ " <td id=\"T_168ea_row0_col63\" class=\"data row0 col63\" >-1.226727</td>\n",
+ " <td id=\"T_168ea_row0_col64\" class=\"data row0 col64\" >0.290018</td>\n",
+ " <td id=\"T_168ea_row0_col65\" class=\"data row0 col65\" >1.164574</td>\n",
+ " <td id=\"T_168ea_row0_col66\" class=\"data row0 col66\" >0.817841</td>\n",
+ " <td id=\"T_168ea_row0_col67\" class=\"data row0 col67\" >-0.309509</td>\n",
+ " <td id=\"T_168ea_row0_col68\" class=\"data row0 col68\" >0.496599</td>\n",
+ " <td id=\"T_168ea_row0_col69\" class=\"data row0 col69\" >0.943536</td>\n",
+ " <td id=\"T_168ea_row0_col70\" class=\"data row0 col70\" >-0.091850</td>\n",
+ " <td id=\"T_168ea_row0_col71\" class=\"data row0 col71\" >-2.802658</td>\n",
+ " <td id=\"T_168ea_row0_col72\" class=\"data row0 col72\" >2.126219</td>\n",
+ " <td id=\"T_168ea_row0_col73\" class=\"data row0 col73\" >-0.521161</td>\n",
+ " <td id=\"T_168ea_row0_col74\" class=\"data row0 col74\" >0.288098</td>\n",
+ " <td id=\"T_168ea_row0_col75\" class=\"data row0 col75\" >-0.454663</td>\n",
+ " <td id=\"T_168ea_row0_col76\" class=\"data row0 col76\" >-1.676143</td>\n",
+ " <td id=\"T_168ea_row0_col77\" class=\"data row0 col77\" >-0.357661</td>\n",
+ " <td id=\"T_168ea_row0_col78\" class=\"data row0 col78\" >-0.788960</td>\n",
+ " <td id=\"T_168ea_row0_col79\" class=\"data row0 col79\" >0.185911</td>\n",
+ " <td id=\"T_168ea_row0_col80\" class=\"data row0 col80\" >-0.017106</td>\n",
+ " <td id=\"T_168ea_row0_col81\" class=\"data row0 col81\" >2.454020</td>\n",
+ " <td id=\"T_168ea_row0_col82\" class=\"data row0 col82\" >1.832706</td>\n",
+ " <td id=\"T_168ea_row0_col83\" class=\"data row0 col83\" >-0.911743</td>\n",
+ " <td id=\"T_168ea_row0_col84\" class=\"data row0 col84\" >-0.655873</td>\n",
+ " <td id=\"T_168ea_row0_col85\" class=\"data row0 col85\" >-0.000514</td>\n",
+ " <td id=\"T_168ea_row0_col86\" class=\"data row0 col86\" >-2.226997</td>\n",
+ " <td id=\"T_168ea_row0_col87\" class=\"data row0 col87\" >0.677285</td>\n",
+ " <td id=\"T_168ea_row0_col88\" class=\"data row0 col88\" >-0.140249</td>\n",
+ " <td id=\"T_168ea_row0_col89\" class=\"data row0 col89\" >-0.408407</td>\n",
+ " <td id=\"T_168ea_row0_col90\" class=\"data row0 col90\" >-0.838665</td>\n",
+ " <td id=\"T_168ea_row0_col91\" class=\"data row0 col91\" >0.482228</td>\n",
+ " <td id=\"T_168ea_row0_col92\" class=\"data row0 col92\" >1.243458</td>\n",
+ " <td id=\"T_168ea_row0_col93\" class=\"data row0 col93\" >-0.477394</td>\n",
+ " <td id=\"T_168ea_row0_col94\" class=\"data row0 col94\" >-0.220343</td>\n",
+ " <td id=\"T_168ea_row0_col95\" class=\"data row0 col95\" >-2.463966</td>\n",
+ " <td id=\"T_168ea_row0_col96\" class=\"data row0 col96\" >0.237325</td>\n",
+ " <td id=\"T_168ea_row0_col97\" class=\"data row0 col97\" >-0.307380</td>\n",
+ " <td id=\"T_168ea_row0_col98\" class=\"data row0 col98\" >1.172478</td>\n",
+ " <td id=\"T_168ea_row0_col99\" class=\"data row0 col99\" >0.819492</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row1\" class=\"row_heading level2 row1\" >1</th>\n",
+ " <td id=\"T_168ea_row1_col0\" class=\"data row1 col0\" >0.405906</td>\n",
+ " <td id=\"T_168ea_row1_col1\" class=\"data row1 col1\" >-0.978919</td>\n",
+ " <td id=\"T_168ea_row1_col2\" class=\"data row1 col2\" >1.267526</td>\n",
+ " <td id=\"T_168ea_row1_col3\" class=\"data row1 col3\" >0.145250</td>\n",
+ " <td id=\"T_168ea_row1_col4\" class=\"data row1 col4\" >-1.066786</td>\n",
+ " <td id=\"T_168ea_row1_col5\" class=\"data row1 col5\" >-2.114192</td>\n",
+ " <td id=\"T_168ea_row1_col6\" class=\"data row1 col6\" >-1.128346</td>\n",
+ " <td id=\"T_168ea_row1_col7\" class=\"data row1 col7\" >-1.082523</td>\n",
+ " <td id=\"T_168ea_row1_col8\" class=\"data row1 col8\" >0.372216</td>\n",
+ " <td id=\"T_168ea_row1_col9\" class=\"data row1 col9\" >0.004127</td>\n",
+ " <td id=\"T_168ea_row1_col10\" class=\"data row1 col10\" >-0.211984</td>\n",
+ " <td id=\"T_168ea_row1_col11\" class=\"data row1 col11\" >0.937326</td>\n",
+ " <td id=\"T_168ea_row1_col12\" class=\"data row1 col12\" >-0.935890</td>\n",
+ " <td id=\"T_168ea_row1_col13\" class=\"data row1 col13\" >-1.704118</td>\n",
+ " <td id=\"T_168ea_row1_col14\" class=\"data row1 col14\" >0.611789</td>\n",
+ " <td id=\"T_168ea_row1_col15\" class=\"data row1 col15\" >-1.030015</td>\n",
+ " <td id=\"T_168ea_row1_col16\" class=\"data row1 col16\" >0.636123</td>\n",
+ " <td id=\"T_168ea_row1_col17\" class=\"data row1 col17\" >-1.506193</td>\n",
+ " <td id=\"T_168ea_row1_col18\" class=\"data row1 col18\" >1.736609</td>\n",
+ " <td id=\"T_168ea_row1_col19\" class=\"data row1 col19\" >1.392958</td>\n",
+ " <td id=\"T_168ea_row1_col20\" class=\"data row1 col20\" >1.009424</td>\n",
+ " <td id=\"T_168ea_row1_col21\" class=\"data row1 col21\" >0.353266</td>\n",
+ " <td id=\"T_168ea_row1_col22\" class=\"data row1 col22\" >0.697339</td>\n",
+ " <td id=\"T_168ea_row1_col23\" class=\"data row1 col23\" >-0.297424</td>\n",
+ " <td id=\"T_168ea_row1_col24\" class=\"data row1 col24\" >0.428702</td>\n",
+ " <td id=\"T_168ea_row1_col25\" class=\"data row1 col25\" >-0.145346</td>\n",
+ " <td id=\"T_168ea_row1_col26\" class=\"data row1 col26\" >-0.333553</td>\n",
+ " <td id=\"T_168ea_row1_col27\" class=\"data row1 col27\" >-0.974699</td>\n",
+ " <td id=\"T_168ea_row1_col28\" class=\"data row1 col28\" >0.665314</td>\n",
+ " <td id=\"T_168ea_row1_col29\" class=\"data row1 col29\" >0.971944</td>\n",
+ " <td id=\"T_168ea_row1_col30\" class=\"data row1 col30\" >0.121950</td>\n",
+ " <td id=\"T_168ea_row1_col31\" class=\"data row1 col31\" >-1.439668</td>\n",
+ " <td id=\"T_168ea_row1_col32\" class=\"data row1 col32\" >1.018808</td>\n",
+ " <td id=\"T_168ea_row1_col33\" class=\"data row1 col33\" >1.442399</td>\n",
+ " <td id=\"T_168ea_row1_col34\" class=\"data row1 col34\" >-0.199585</td>\n",
+ " <td id=\"T_168ea_row1_col35\" class=\"data row1 col35\" >-1.165916</td>\n",
+ " <td id=\"T_168ea_row1_col36\" class=\"data row1 col36\" >0.645656</td>\n",
+ " <td id=\"T_168ea_row1_col37\" class=\"data row1 col37\" >1.436466</td>\n",
+ " <td id=\"T_168ea_row1_col38\" class=\"data row1 col38\" >-0.921215</td>\n",
+ " <td id=\"T_168ea_row1_col39\" class=\"data row1 col39\" >1.293906</td>\n",
+ " <td id=\"T_168ea_row1_col40\" class=\"data row1 col40\" >-2.706443</td>\n",
+ " <td id=\"T_168ea_row1_col41\" class=\"data row1 col41\" >1.460928</td>\n",
+ " <td id=\"T_168ea_row1_col42\" class=\"data row1 col42\" >-0.823197</td>\n",
+ " <td id=\"T_168ea_row1_col43\" class=\"data row1 col43\" >0.292952</td>\n",
+ " <td id=\"T_168ea_row1_col44\" class=\"data row1 col44\" >-1.448992</td>\n",
+ " <td id=\"T_168ea_row1_col45\" class=\"data row1 col45\" >0.026692</td>\n",
+ " <td id=\"T_168ea_row1_col46\" class=\"data row1 col46\" >-0.975883</td>\n",
+ " <td id=\"T_168ea_row1_col47\" class=\"data row1 col47\" >0.392823</td>\n",
+ " <td id=\"T_168ea_row1_col48\" class=\"data row1 col48\" >0.442166</td>\n",
+ " <td id=\"T_168ea_row1_col49\" class=\"data row1 col49\" >0.745741</td>\n",
+ " <td id=\"T_168ea_row1_col50\" class=\"data row1 col50\" >1.187982</td>\n",
+ " <td id=\"T_168ea_row1_col51\" class=\"data row1 col51\" >-0.218570</td>\n",
+ " <td id=\"T_168ea_row1_col52\" class=\"data row1 col52\" >0.305288</td>\n",
+ " <td id=\"T_168ea_row1_col53\" class=\"data row1 col53\" >0.054932</td>\n",
+ " <td id=\"T_168ea_row1_col54\" class=\"data row1 col54\" >-1.476953</td>\n",
+ " <td id=\"T_168ea_row1_col55\" class=\"data row1 col55\" >-0.114434</td>\n",
+ " <td id=\"T_168ea_row1_col56\" class=\"data row1 col56\" >0.014103</td>\n",
+ " <td id=\"T_168ea_row1_col57\" class=\"data row1 col57\" >0.825394</td>\n",
+ " <td id=\"T_168ea_row1_col58\" class=\"data row1 col58\" >-0.060654</td>\n",
+ " <td id=\"T_168ea_row1_col59\" class=\"data row1 col59\" >-0.413688</td>\n",
+ " <td id=\"T_168ea_row1_col60\" class=\"data row1 col60\" >0.974836</td>\n",
+ " <td id=\"T_168ea_row1_col61\" class=\"data row1 col61\" >1.339210</td>\n",
+ " <td id=\"T_168ea_row1_col62\" class=\"data row1 col62\" >1.034838</td>\n",
+ " <td id=\"T_168ea_row1_col63\" class=\"data row1 col63\" >0.040775</td>\n",
+ " <td id=\"T_168ea_row1_col64\" class=\"data row1 col64\" >0.705001</td>\n",
+ " <td id=\"T_168ea_row1_col65\" class=\"data row1 col65\" >0.017796</td>\n",
+ " <td id=\"T_168ea_row1_col66\" class=\"data row1 col66\" >1.867681</td>\n",
+ " <td id=\"T_168ea_row1_col67\" class=\"data row1 col67\" >-0.390173</td>\n",
+ " <td id=\"T_168ea_row1_col68\" class=\"data row1 col68\" >2.285277</td>\n",
+ " <td id=\"T_168ea_row1_col69\" class=\"data row1 col69\" >2.311464</td>\n",
+ " <td id=\"T_168ea_row1_col70\" class=\"data row1 col70\" >-0.085070</td>\n",
+ " <td id=\"T_168ea_row1_col71\" class=\"data row1 col71\" >-0.648115</td>\n",
+ " <td id=\"T_168ea_row1_col72\" class=\"data row1 col72\" >0.576300</td>\n",
+ " <td id=\"T_168ea_row1_col73\" class=\"data row1 col73\" >-0.790087</td>\n",
+ " <td id=\"T_168ea_row1_col74\" class=\"data row1 col74\" >-1.183798</td>\n",
+ " <td id=\"T_168ea_row1_col75\" class=\"data row1 col75\" >-1.334558</td>\n",
+ " <td id=\"T_168ea_row1_col76\" class=\"data row1 col76\" >-0.454118</td>\n",
+ " <td id=\"T_168ea_row1_col77\" class=\"data row1 col77\" >0.319302</td>\n",
+ " <td id=\"T_168ea_row1_col78\" class=\"data row1 col78\" >1.706488</td>\n",
+ " <td id=\"T_168ea_row1_col79\" class=\"data row1 col79\" >0.830429</td>\n",
+ " <td id=\"T_168ea_row1_col80\" class=\"data row1 col80\" >0.502476</td>\n",
+ " <td id=\"T_168ea_row1_col81\" class=\"data row1 col81\" >-0.079631</td>\n",
+ " <td id=\"T_168ea_row1_col82\" class=\"data row1 col82\" >0.414635</td>\n",
+ " <td id=\"T_168ea_row1_col83\" class=\"data row1 col83\" >0.332511</td>\n",
+ " <td id=\"T_168ea_row1_col84\" class=\"data row1 col84\" >0.042935</td>\n",
+ " <td id=\"T_168ea_row1_col85\" class=\"data row1 col85\" >-0.160910</td>\n",
+ " <td id=\"T_168ea_row1_col86\" class=\"data row1 col86\" >0.918553</td>\n",
+ " <td id=\"T_168ea_row1_col87\" class=\"data row1 col87\" >-0.292697</td>\n",
+ " <td id=\"T_168ea_row1_col88\" class=\"data row1 col88\" >-1.303834</td>\n",
+ " <td id=\"T_168ea_row1_col89\" class=\"data row1 col89\" >-0.199604</td>\n",
+ " <td id=\"T_168ea_row1_col90\" class=\"data row1 col90\" >0.871023</td>\n",
+ " <td id=\"T_168ea_row1_col91\" class=\"data row1 col91\" >-1.370681</td>\n",
+ " <td id=\"T_168ea_row1_col92\" class=\"data row1 col92\" >-0.205701</td>\n",
+ " <td id=\"T_168ea_row1_col93\" class=\"data row1 col93\" >-0.492973</td>\n",
+ " <td id=\"T_168ea_row1_col94\" class=\"data row1 col94\" >1.123083</td>\n",
+ " <td id=\"T_168ea_row1_col95\" class=\"data row1 col95\" >-0.081842</td>\n",
+ " <td id=\"T_168ea_row1_col96\" class=\"data row1 col96\" >-0.118527</td>\n",
+ " <td id=\"T_168ea_row1_col97\" class=\"data row1 col97\" >0.245838</td>\n",
+ " <td id=\"T_168ea_row1_col98\" class=\"data row1 col98\" >-0.315742</td>\n",
+ " <td id=\"T_168ea_row1_col99\" class=\"data row1 col99\" >-0.511806</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row2\" class=\"row_heading level2 row2\" >2</th>\n",
+ " <td id=\"T_168ea_row2_col0\" class=\"data row2 col0\" >0.011470</td>\n",
+ " <td id=\"T_168ea_row2_col1\" class=\"data row2 col1\" >-0.036104</td>\n",
+ " <td id=\"T_168ea_row2_col2\" class=\"data row2 col2\" >1.399603</td>\n",
+ " <td id=\"T_168ea_row2_col3\" class=\"data row2 col3\" >-0.418176</td>\n",
+ " <td id=\"T_168ea_row2_col4\" class=\"data row2 col4\" >-0.412229</td>\n",
+ " <td id=\"T_168ea_row2_col5\" class=\"data row2 col5\" >-1.234783</td>\n",
+ " <td id=\"T_168ea_row2_col6\" class=\"data row2 col6\" >-1.121500</td>\n",
+ " <td id=\"T_168ea_row2_col7\" class=\"data row2 col7\" >1.196478</td>\n",
+ " <td id=\"T_168ea_row2_col8\" class=\"data row2 col8\" >-0.569522</td>\n",
+ " <td id=\"T_168ea_row2_col9\" class=\"data row2 col9\" >0.422022</td>\n",
+ " <td id=\"T_168ea_row2_col10\" class=\"data row2 col10\" >-0.220484</td>\n",
+ " <td id=\"T_168ea_row2_col11\" class=\"data row2 col11\" >0.804338</td>\n",
+ " <td id=\"T_168ea_row2_col12\" class=\"data row2 col12\" >2.892667</td>\n",
+ " <td id=\"T_168ea_row2_col13\" class=\"data row2 col13\" >-0.511055</td>\n",
+ " <td id=\"T_168ea_row2_col14\" class=\"data row2 col14\" >-0.168722</td>\n",
+ " <td id=\"T_168ea_row2_col15\" class=\"data row2 col15\" >-1.477996</td>\n",
+ " <td id=\"T_168ea_row2_col16\" class=\"data row2 col16\" >-1.969917</td>\n",
+ " <td id=\"T_168ea_row2_col17\" class=\"data row2 col17\" >0.471354</td>\n",
+ " <td id=\"T_168ea_row2_col18\" class=\"data row2 col18\" >1.698548</td>\n",
+ " <td id=\"T_168ea_row2_col19\" class=\"data row2 col19\" >0.137105</td>\n",
+ " <td id=\"T_168ea_row2_col20\" class=\"data row2 col20\" >-0.762052</td>\n",
+ " <td id=\"T_168ea_row2_col21\" class=\"data row2 col21\" >0.199379</td>\n",
+ " <td id=\"T_168ea_row2_col22\" class=\"data row2 col22\" >-0.964346</td>\n",
+ " <td id=\"T_168ea_row2_col23\" class=\"data row2 col23\" >-0.256692</td>\n",
+ " <td id=\"T_168ea_row2_col24\" class=\"data row2 col24\" >1.265275</td>\n",
+ " <td id=\"T_168ea_row2_col25\" class=\"data row2 col25\" >0.848762</td>\n",
+ " <td id=\"T_168ea_row2_col26\" class=\"data row2 col26\" >-0.784161</td>\n",
+ " <td id=\"T_168ea_row2_col27\" class=\"data row2 col27\" >1.863776</td>\n",
+ " <td id=\"T_168ea_row2_col28\" class=\"data row2 col28\" >-0.355569</td>\n",
+ " <td id=\"T_168ea_row2_col29\" class=\"data row2 col29\" >0.854552</td>\n",
+ " <td id=\"T_168ea_row2_col30\" class=\"data row2 col30\" >0.768061</td>\n",
+ " <td id=\"T_168ea_row2_col31\" class=\"data row2 col31\" >-2.075718</td>\n",
+ " <td id=\"T_168ea_row2_col32\" class=\"data row2 col32\" >-2.501069</td>\n",
+ " <td id=\"T_168ea_row2_col33\" class=\"data row2 col33\" >1.109868</td>\n",
+ " <td id=\"T_168ea_row2_col34\" class=\"data row2 col34\" >0.957545</td>\n",
+ " <td id=\"T_168ea_row2_col35\" class=\"data row2 col35\" >-0.683276</td>\n",
+ " <td id=\"T_168ea_row2_col36\" class=\"data row2 col36\" >0.307764</td>\n",
+ " <td id=\"T_168ea_row2_col37\" class=\"data row2 col37\" >0.733073</td>\n",
+ " <td id=\"T_168ea_row2_col38\" class=\"data row2 col38\" >1.706250</td>\n",
+ " <td id=\"T_168ea_row2_col39\" class=\"data row2 col39\" >-1.118091</td>\n",
+ " <td id=\"T_168ea_row2_col40\" class=\"data row2 col40\" >0.374961</td>\n",
+ " <td id=\"T_168ea_row2_col41\" class=\"data row2 col41\" >-1.414503</td>\n",
+ " <td id=\"T_168ea_row2_col42\" class=\"data row2 col42\" >-0.524183</td>\n",
+ " <td id=\"T_168ea_row2_col43\" class=\"data row2 col43\" >-1.662696</td>\n",
+ " <td id=\"T_168ea_row2_col44\" class=\"data row2 col44\" >0.687921</td>\n",
+ " <td id=\"T_168ea_row2_col45\" class=\"data row2 col45\" >0.521732</td>\n",
+ " <td id=\"T_168ea_row2_col46\" class=\"data row2 col46\" >1.451396</td>\n",
+ " <td id=\"T_168ea_row2_col47\" class=\"data row2 col47\" >-0.833491</td>\n",
+ " <td id=\"T_168ea_row2_col48\" class=\"data row2 col48\" >-0.362796</td>\n",
+ " <td id=\"T_168ea_row2_col49\" class=\"data row2 col49\" >-1.174444</td>\n",
+ " <td id=\"T_168ea_row2_col50\" class=\"data row2 col50\" >-0.813893</td>\n",
+ " <td id=\"T_168ea_row2_col51\" class=\"data row2 col51\" >-0.893220</td>\n",
+ " <td id=\"T_168ea_row2_col52\" class=\"data row2 col52\" >0.770743</td>\n",
+ " <td id=\"T_168ea_row2_col53\" class=\"data row2 col53\" >1.156647</td>\n",
+ " <td id=\"T_168ea_row2_col54\" class=\"data row2 col54\" >-0.647444</td>\n",
+ " <td id=\"T_168ea_row2_col55\" class=\"data row2 col55\" >0.125929</td>\n",
+ " <td id=\"T_168ea_row2_col56\" class=\"data row2 col56\" >0.513600</td>\n",
+ " <td id=\"T_168ea_row2_col57\" class=\"data row2 col57\" >-0.537874</td>\n",
+ " <td id=\"T_168ea_row2_col58\" class=\"data row2 col58\" >1.992052</td>\n",
+ " <td id=\"T_168ea_row2_col59\" class=\"data row2 col59\" >-1.946584</td>\n",
+ " <td id=\"T_168ea_row2_col60\" class=\"data row2 col60\" >-0.104759</td>\n",
+ " <td id=\"T_168ea_row2_col61\" class=\"data row2 col61\" >0.484779</td>\n",
+ " <td id=\"T_168ea_row2_col62\" class=\"data row2 col62\" >-0.290936</td>\n",
+ " <td id=\"T_168ea_row2_col63\" class=\"data row2 col63\" >-0.441075</td>\n",
+ " <td id=\"T_168ea_row2_col64\" class=\"data row2 col64\" >0.542993</td>\n",
+ " <td id=\"T_168ea_row2_col65\" class=\"data row2 col65\" >-1.050038</td>\n",
+ " <td id=\"T_168ea_row2_col66\" class=\"data row2 col66\" >1.630482</td>\n",
+ " <td id=\"T_168ea_row2_col67\" class=\"data row2 col67\" >0.239771</td>\n",
+ " <td id=\"T_168ea_row2_col68\" class=\"data row2 col68\" >-1.177310</td>\n",
+ " <td id=\"T_168ea_row2_col69\" class=\"data row2 col69\" >0.464804</td>\n",
+ " <td id=\"T_168ea_row2_col70\" class=\"data row2 col70\" >-0.966995</td>\n",
+ " <td id=\"T_168ea_row2_col71\" class=\"data row2 col71\" >0.646086</td>\n",
+ " <td id=\"T_168ea_row2_col72\" class=\"data row2 col72\" >0.486899</td>\n",
+ " <td id=\"T_168ea_row2_col73\" class=\"data row2 col73\" >1.022196</td>\n",
+ " <td id=\"T_168ea_row2_col74\" class=\"data row2 col74\" >-2.267827</td>\n",
+ " <td id=\"T_168ea_row2_col75\" class=\"data row2 col75\" >-1.229616</td>\n",
+ " <td id=\"T_168ea_row2_col76\" class=\"data row2 col76\" >1.313805</td>\n",
+ " <td id=\"T_168ea_row2_col77\" class=\"data row2 col77\" >1.073292</td>\n",
+ " <td id=\"T_168ea_row2_col78\" class=\"data row2 col78\" >2.324940</td>\n",
+ " <td id=\"T_168ea_row2_col79\" class=\"data row2 col79\" >-0.542720</td>\n",
+ " <td id=\"T_168ea_row2_col80\" class=\"data row2 col80\" >-1.504292</td>\n",
+ " <td id=\"T_168ea_row2_col81\" class=\"data row2 col81\" >0.777643</td>\n",
+ " <td id=\"T_168ea_row2_col82\" class=\"data row2 col82\" >-0.618553</td>\n",
+ " <td id=\"T_168ea_row2_col83\" class=\"data row2 col83\" >0.011342</td>\n",
+ " <td id=\"T_168ea_row2_col84\" class=\"data row2 col84\" >1.385062</td>\n",
+ " <td id=\"T_168ea_row2_col85\" class=\"data row2 col85\" >1.363552</td>\n",
+ " <td id=\"T_168ea_row2_col86\" class=\"data row2 col86\" >-0.549834</td>\n",
+ " <td id=\"T_168ea_row2_col87\" class=\"data row2 col87\" >0.688896</td>\n",
+ " <td id=\"T_168ea_row2_col88\" class=\"data row2 col88\" >1.361288</td>\n",
+ " <td id=\"T_168ea_row2_col89\" class=\"data row2 col89\" >-0.381137</td>\n",
+ " <td id=\"T_168ea_row2_col90\" class=\"data row2 col90\" >0.797812</td>\n",
+ " <td id=\"T_168ea_row2_col91\" class=\"data row2 col91\" >-1.128198</td>\n",
+ " <td id=\"T_168ea_row2_col92\" class=\"data row2 col92\" >0.369208</td>\n",
+ " <td id=\"T_168ea_row2_col93\" class=\"data row2 col93\" >0.540132</td>\n",
+ " <td id=\"T_168ea_row2_col94\" class=\"data row2 col94\" >0.413853</td>\n",
+ " <td id=\"T_168ea_row2_col95\" class=\"data row2 col95\" >-0.200308</td>\n",
+ " <td id=\"T_168ea_row2_col96\" class=\"data row2 col96\" >-0.969126</td>\n",
+ " <td id=\"T_168ea_row2_col97\" class=\"data row2 col97\" >0.981293</td>\n",
+ " <td id=\"T_168ea_row2_col98\" class=\"data row2 col98\" >-0.009783</td>\n",
+ " <td id=\"T_168ea_row2_col99\" class=\"data row2 col99\" >-0.320020</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row3\" class=\"row_heading level2 row3\" >3</th>\n",
+ " <td id=\"T_168ea_row3_col0\" class=\"data row3 col0\" >-0.574816</td>\n",
+ " <td id=\"T_168ea_row3_col1\" class=\"data row3 col1\" >1.419977</td>\n",
+ " <td id=\"T_168ea_row3_col2\" class=\"data row3 col2\" >0.434813</td>\n",
+ " <td id=\"T_168ea_row3_col3\" class=\"data row3 col3\" >-1.101217</td>\n",
+ " <td id=\"T_168ea_row3_col4\" class=\"data row3 col4\" >-1.586275</td>\n",
+ " <td id=\"T_168ea_row3_col5\" class=\"data row3 col5\" >1.979573</td>\n",
+ " <td id=\"T_168ea_row3_col6\" class=\"data row3 col6\" >0.378298</td>\n",
+ " <td id=\"T_168ea_row3_col7\" class=\"data row3 col7\" >0.782326</td>\n",
+ " <td id=\"T_168ea_row3_col8\" class=\"data row3 col8\" >2.178987</td>\n",
+ " <td id=\"T_168ea_row3_col9\" class=\"data row3 col9\" >0.657564</td>\n",
+ " <td id=\"T_168ea_row3_col10\" class=\"data row3 col10\" >0.683774</td>\n",
+ " <td id=\"T_168ea_row3_col11\" class=\"data row3 col11\" >-0.091000</td>\n",
+ " <td id=\"T_168ea_row3_col12\" class=\"data row3 col12\" >-0.059552</td>\n",
+ " <td id=\"T_168ea_row3_col13\" class=\"data row3 col13\" >-0.738908</td>\n",
+ " <td id=\"T_168ea_row3_col14\" class=\"data row3 col14\" >-0.907653</td>\n",
+ " <td id=\"T_168ea_row3_col15\" class=\"data row3 col15\" >-0.701936</td>\n",
+ " <td id=\"T_168ea_row3_col16\" class=\"data row3 col16\" >0.580039</td>\n",
+ " <td id=\"T_168ea_row3_col17\" class=\"data row3 col17\" >-0.618757</td>\n",
+ " <td id=\"T_168ea_row3_col18\" class=\"data row3 col18\" >0.453684</td>\n",
+ " <td id=\"T_168ea_row3_col19\" class=\"data row3 col19\" >1.665382</td>\n",
+ " <td id=\"T_168ea_row3_col20\" class=\"data row3 col20\" >-0.152321</td>\n",
+ " <td id=\"T_168ea_row3_col21\" class=\"data row3 col21\" >0.880077</td>\n",
+ " <td id=\"T_168ea_row3_col22\" class=\"data row3 col22\" >0.571073</td>\n",
+ " <td id=\"T_168ea_row3_col23\" class=\"data row3 col23\" >-0.604736</td>\n",
+ " <td id=\"T_168ea_row3_col24\" class=\"data row3 col24\" >0.532359</td>\n",
+ " <td id=\"T_168ea_row3_col25\" class=\"data row3 col25\" >0.515031</td>\n",
+ " <td id=\"T_168ea_row3_col26\" class=\"data row3 col26\" >-0.959844</td>\n",
+ " <td id=\"T_168ea_row3_col27\" class=\"data row3 col27\" >-0.887184</td>\n",
+ " <td id=\"T_168ea_row3_col28\" class=\"data row3 col28\" >0.435781</td>\n",
+ " <td id=\"T_168ea_row3_col29\" class=\"data row3 col29\" >0.862093</td>\n",
+ " <td id=\"T_168ea_row3_col30\" class=\"data row3 col30\" >-0.956321</td>\n",
+ " <td id=\"T_168ea_row3_col31\" class=\"data row3 col31\" >-0.625909</td>\n",
+ " <td id=\"T_168ea_row3_col32\" class=\"data row3 col32\" >0.194472</td>\n",
+ " <td id=\"T_168ea_row3_col33\" class=\"data row3 col33\" >0.442490</td>\n",
+ " <td id=\"T_168ea_row3_col34\" class=\"data row3 col34\" >0.526503</td>\n",
+ " <td id=\"T_168ea_row3_col35\" class=\"data row3 col35\" >-0.215274</td>\n",
+ " <td id=\"T_168ea_row3_col36\" class=\"data row3 col36\" >0.090711</td>\n",
+ " <td id=\"T_168ea_row3_col37\" class=\"data row3 col37\" >0.932592</td>\n",
+ " <td id=\"T_168ea_row3_col38\" class=\"data row3 col38\" >0.811999</td>\n",
+ " <td id=\"T_168ea_row3_col39\" class=\"data row3 col39\" >-2.497026</td>\n",
+ " <td id=\"T_168ea_row3_col40\" class=\"data row3 col40\" >0.631545</td>\n",
+ " <td id=\"T_168ea_row3_col41\" class=\"data row3 col41\" >0.321418</td>\n",
+ " <td id=\"T_168ea_row3_col42\" class=\"data row3 col42\" >-0.425549</td>\n",
+ " <td id=\"T_168ea_row3_col43\" class=\"data row3 col43\" >-1.078832</td>\n",
+ " <td id=\"T_168ea_row3_col44\" class=\"data row3 col44\" >0.753444</td>\n",
+ " <td id=\"T_168ea_row3_col45\" class=\"data row3 col45\" >0.199790</td>\n",
+ " <td id=\"T_168ea_row3_col46\" class=\"data row3 col46\" >-0.360526</td>\n",
+ " <td id=\"T_168ea_row3_col47\" class=\"data row3 col47\" >-0.013448</td>\n",
+ " <td id=\"T_168ea_row3_col48\" class=\"data row3 col48\" >-0.819476</td>\n",
+ " <td id=\"T_168ea_row3_col49\" class=\"data row3 col49\" >0.814869</td>\n",
+ " <td id=\"T_168ea_row3_col50\" class=\"data row3 col50\" >0.442118</td>\n",
+ " <td id=\"T_168ea_row3_col51\" class=\"data row3 col51\" >-0.972048</td>\n",
+ " <td id=\"T_168ea_row3_col52\" class=\"data row3 col52\" >-0.060603</td>\n",
+ " <td id=\"T_168ea_row3_col53\" class=\"data row3 col53\" >-2.349825</td>\n",
+ " <td id=\"T_168ea_row3_col54\" class=\"data row3 col54\" >1.265445</td>\n",
+ " <td id=\"T_168ea_row3_col55\" class=\"data row3 col55\" >-0.573257</td>\n",
+ " <td id=\"T_168ea_row3_col56\" class=\"data row3 col56\" >0.429124</td>\n",
+ " <td id=\"T_168ea_row3_col57\" class=\"data row3 col57\" >1.049783</td>\n",
+ " <td id=\"T_168ea_row3_col58\" class=\"data row3 col58\" >1.954773</td>\n",
+ " <td id=\"T_168ea_row3_col59\" class=\"data row3 col59\" >0.071883</td>\n",
+ " <td id=\"T_168ea_row3_col60\" class=\"data row3 col60\" >-0.094209</td>\n",
+ " <td id=\"T_168ea_row3_col61\" class=\"data row3 col61\" >0.265616</td>\n",
+ " <td id=\"T_168ea_row3_col62\" class=\"data row3 col62\" >0.948318</td>\n",
+ " <td id=\"T_168ea_row3_col63\" class=\"data row3 col63\" >0.331645</td>\n",
+ " <td id=\"T_168ea_row3_col64\" class=\"data row3 col64\" >1.343401</td>\n",
+ " <td id=\"T_168ea_row3_col65\" class=\"data row3 col65\" >-0.167934</td>\n",
+ " <td id=\"T_168ea_row3_col66\" class=\"data row3 col66\" >-1.105252</td>\n",
+ " <td id=\"T_168ea_row3_col67\" class=\"data row3 col67\" >-0.167077</td>\n",
+ " <td id=\"T_168ea_row3_col68\" class=\"data row3 col68\" >-0.096576</td>\n",
+ " <td id=\"T_168ea_row3_col69\" class=\"data row3 col69\" >-0.838161</td>\n",
+ " <td id=\"T_168ea_row3_col70\" class=\"data row3 col70\" >-0.208564</td>\n",
+ " <td id=\"T_168ea_row3_col71\" class=\"data row3 col71\" >0.394534</td>\n",
+ " <td id=\"T_168ea_row3_col72\" class=\"data row3 col72\" >0.762533</td>\n",
+ " <td id=\"T_168ea_row3_col73\" class=\"data row3 col73\" >1.235357</td>\n",
+ " <td id=\"T_168ea_row3_col74\" class=\"data row3 col74\" >-0.207282</td>\n",
+ " <td id=\"T_168ea_row3_col75\" class=\"data row3 col75\" >-0.202946</td>\n",
+ " <td id=\"T_168ea_row3_col76\" class=\"data row3 col76\" >-0.468025</td>\n",
+ " <td id=\"T_168ea_row3_col77\" class=\"data row3 col77\" >0.256944</td>\n",
+ " <td id=\"T_168ea_row3_col78\" class=\"data row3 col78\" >2.587584</td>\n",
+ " <td id=\"T_168ea_row3_col79\" class=\"data row3 col79\" >1.186697</td>\n",
+ " <td id=\"T_168ea_row3_col80\" class=\"data row3 col80\" >-1.031903</td>\n",
+ " <td id=\"T_168ea_row3_col81\" class=\"data row3 col81\" >1.428316</td>\n",
+ " <td id=\"T_168ea_row3_col82\" class=\"data row3 col82\" >0.658899</td>\n",
+ " <td id=\"T_168ea_row3_col83\" class=\"data row3 col83\" >-0.046582</td>\n",
+ " <td id=\"T_168ea_row3_col84\" class=\"data row3 col84\" >-0.075422</td>\n",
+ " <td id=\"T_168ea_row3_col85\" class=\"data row3 col85\" >1.329359</td>\n",
+ " <td id=\"T_168ea_row3_col86\" class=\"data row3 col86\" >-0.684267</td>\n",
+ " <td id=\"T_168ea_row3_col87\" class=\"data row3 col87\" >-1.524182</td>\n",
+ " <td id=\"T_168ea_row3_col88\" class=\"data row3 col88\" >2.014061</td>\n",
+ " <td id=\"T_168ea_row3_col89\" class=\"data row3 col89\" >3.770933</td>\n",
+ " <td id=\"T_168ea_row3_col90\" class=\"data row3 col90\" >0.647353</td>\n",
+ " <td id=\"T_168ea_row3_col91\" class=\"data row3 col91\" >-1.021377</td>\n",
+ " <td id=\"T_168ea_row3_col92\" class=\"data row3 col92\" >-0.345493</td>\n",
+ " <td id=\"T_168ea_row3_col93\" class=\"data row3 col93\" >0.582811</td>\n",
+ " <td id=\"T_168ea_row3_col94\" class=\"data row3 col94\" >0.797812</td>\n",
+ " <td id=\"T_168ea_row3_col95\" class=\"data row3 col95\" >1.326020</td>\n",
+ " <td id=\"T_168ea_row3_col96\" class=\"data row3 col96\" >1.422857</td>\n",
+ " <td id=\"T_168ea_row3_col97\" class=\"data row3 col97\" >-3.077007</td>\n",
+ " <td id=\"T_168ea_row3_col98\" class=\"data row3 col98\" >0.184083</td>\n",
+ " <td id=\"T_168ea_row3_col99\" class=\"data row3 col99\" >1.478935</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level1_row4\" class=\"row_heading level1 row4\" rowspan=\"4\">1</th>\n",
+ " <th id=\"T_168ea_level2_row4\" class=\"row_heading level2 row4\" >0</th>\n",
+ " <td id=\"T_168ea_row4_col0\" class=\"data row4 col0\" >-0.600142</td>\n",
+ " <td id=\"T_168ea_row4_col1\" class=\"data row4 col1\" >1.929561</td>\n",
+ " <td id=\"T_168ea_row4_col2\" class=\"data row4 col2\" >-2.346771</td>\n",
+ " <td id=\"T_168ea_row4_col3\" class=\"data row4 col3\" >-0.669700</td>\n",
+ " <td id=\"T_168ea_row4_col4\" class=\"data row4 col4\" >-1.165258</td>\n",
+ " <td id=\"T_168ea_row4_col5\" class=\"data row4 col5\" >0.814788</td>\n",
+ " <td id=\"T_168ea_row4_col6\" class=\"data row4 col6\" >0.444449</td>\n",
+ " <td id=\"T_168ea_row4_col7\" class=\"data row4 col7\" >-0.576758</td>\n",
+ " <td id=\"T_168ea_row4_col8\" class=\"data row4 col8\" >0.353091</td>\n",
+ " <td id=\"T_168ea_row4_col9\" class=\"data row4 col9\" >0.408893</td>\n",
+ " <td id=\"T_168ea_row4_col10\" class=\"data row4 col10\" >0.091391</td>\n",
+ " <td id=\"T_168ea_row4_col11\" class=\"data row4 col11\" >-2.294389</td>\n",
+ " <td id=\"T_168ea_row4_col12\" class=\"data row4 col12\" >0.485506</td>\n",
+ " <td id=\"T_168ea_row4_col13\" class=\"data row4 col13\" >-0.081304</td>\n",
+ " <td id=\"T_168ea_row4_col14\" class=\"data row4 col14\" >-0.716272</td>\n",
+ " <td id=\"T_168ea_row4_col15\" class=\"data row4 col15\" >-1.648010</td>\n",
+ " <td id=\"T_168ea_row4_col16\" class=\"data row4 col16\" >1.005361</td>\n",
+ " <td id=\"T_168ea_row4_col17\" class=\"data row4 col17\" >-1.489603</td>\n",
+ " <td id=\"T_168ea_row4_col18\" class=\"data row4 col18\" >0.363098</td>\n",
+ " <td id=\"T_168ea_row4_col19\" class=\"data row4 col19\" >0.758602</td>\n",
+ " <td id=\"T_168ea_row4_col20\" class=\"data row4 col20\" >-1.373847</td>\n",
+ " <td id=\"T_168ea_row4_col21\" class=\"data row4 col21\" >-0.972057</td>\n",
+ " <td id=\"T_168ea_row4_col22\" class=\"data row4 col22\" >1.988537</td>\n",
+ " <td id=\"T_168ea_row4_col23\" class=\"data row4 col23\" >0.319829</td>\n",
+ " <td id=\"T_168ea_row4_col24\" class=\"data row4 col24\" >1.169060</td>\n",
+ " <td id=\"T_168ea_row4_col25\" class=\"data row4 col25\" >0.146585</td>\n",
+ " <td id=\"T_168ea_row4_col26\" class=\"data row4 col26\" >1.030388</td>\n",
+ " <td id=\"T_168ea_row4_col27\" class=\"data row4 col27\" >1.165984</td>\n",
+ " <td id=\"T_168ea_row4_col28\" class=\"data row4 col28\" >1.369563</td>\n",
+ " <td id=\"T_168ea_row4_col29\" class=\"data row4 col29\" >0.730984</td>\n",
+ " <td id=\"T_168ea_row4_col30\" class=\"data row4 col30\" >-1.383696</td>\n",
+ " <td id=\"T_168ea_row4_col31\" class=\"data row4 col31\" >-0.515189</td>\n",
+ " <td id=\"T_168ea_row4_col32\" class=\"data row4 col32\" >-0.808927</td>\n",
+ " <td id=\"T_168ea_row4_col33\" class=\"data row4 col33\" >-1.174651</td>\n",
+ " <td id=\"T_168ea_row4_col34\" class=\"data row4 col34\" >-1.631502</td>\n",
+ " <td id=\"T_168ea_row4_col35\" class=\"data row4 col35\" >-1.123414</td>\n",
+ " <td id=\"T_168ea_row4_col36\" class=\"data row4 col36\" >-0.478155</td>\n",
+ " <td id=\"T_168ea_row4_col37\" class=\"data row4 col37\" >-1.583067</td>\n",
+ " <td id=\"T_168ea_row4_col38\" class=\"data row4 col38\" >1.419074</td>\n",
+ " <td id=\"T_168ea_row4_col39\" class=\"data row4 col39\" >1.668777</td>\n",
+ " <td id=\"T_168ea_row4_col40\" class=\"data row4 col40\" >1.567517</td>\n",
+ " <td id=\"T_168ea_row4_col41\" class=\"data row4 col41\" >0.222103</td>\n",
+ " <td id=\"T_168ea_row4_col42\" class=\"data row4 col42\" >-0.336040</td>\n",
+ " <td id=\"T_168ea_row4_col43\" class=\"data row4 col43\" >-1.352064</td>\n",
+ " <td id=\"T_168ea_row4_col44\" class=\"data row4 col44\" >0.251032</td>\n",
+ " <td id=\"T_168ea_row4_col45\" class=\"data row4 col45\" >-0.401695</td>\n",
+ " <td id=\"T_168ea_row4_col46\" class=\"data row4 col46\" >0.268413</td>\n",
+ " <td id=\"T_168ea_row4_col47\" class=\"data row4 col47\" >-0.012299</td>\n",
+ " <td id=\"T_168ea_row4_col48\" class=\"data row4 col48\" >-0.918953</td>\n",
+ " <td id=\"T_168ea_row4_col49\" class=\"data row4 col49\" >2.921208</td>\n",
+ " <td id=\"T_168ea_row4_col50\" class=\"data row4 col50\" >-0.581588</td>\n",
+ " <td id=\"T_168ea_row4_col51\" class=\"data row4 col51\" >0.672848</td>\n",
+ " <td id=\"T_168ea_row4_col52\" class=\"data row4 col52\" >1.251136</td>\n",
+ " <td id=\"T_168ea_row4_col53\" class=\"data row4 col53\" >1.382263</td>\n",
+ " <td id=\"T_168ea_row4_col54\" class=\"data row4 col54\" >1.429897</td>\n",
+ " <td id=\"T_168ea_row4_col55\" class=\"data row4 col55\" >1.290990</td>\n",
+ " <td id=\"T_168ea_row4_col56\" class=\"data row4 col56\" >-1.272673</td>\n",
+ " <td id=\"T_168ea_row4_col57\" class=\"data row4 col57\" >-0.308611</td>\n",
+ " <td id=\"T_168ea_row4_col58\" class=\"data row4 col58\" >-0.422988</td>\n",
+ " <td id=\"T_168ea_row4_col59\" class=\"data row4 col59\" >-0.675642</td>\n",
+ " <td id=\"T_168ea_row4_col60\" class=\"data row4 col60\" >0.874441</td>\n",
+ " <td id=\"T_168ea_row4_col61\" class=\"data row4 col61\" >1.305736</td>\n",
+ " <td id=\"T_168ea_row4_col62\" class=\"data row4 col62\" >-0.262585</td>\n",
+ " <td id=\"T_168ea_row4_col63\" class=\"data row4 col63\" >-1.099395</td>\n",
+ " <td id=\"T_168ea_row4_col64\" class=\"data row4 col64\" >-0.667101</td>\n",
+ " <td id=\"T_168ea_row4_col65\" class=\"data row4 col65\" >-0.646737</td>\n",
+ " <td id=\"T_168ea_row4_col66\" class=\"data row4 col66\" >-0.556338</td>\n",
+ " <td id=\"T_168ea_row4_col67\" class=\"data row4 col67\" >-0.196591</td>\n",
+ " <td id=\"T_168ea_row4_col68\" class=\"data row4 col68\" >0.119306</td>\n",
+ " <td id=\"T_168ea_row4_col69\" class=\"data row4 col69\" >-0.266455</td>\n",
+ " <td id=\"T_168ea_row4_col70\" class=\"data row4 col70\" >-0.524267</td>\n",
+ " <td id=\"T_168ea_row4_col71\" class=\"data row4 col71\" >2.650951</td>\n",
+ " <td id=\"T_168ea_row4_col72\" class=\"data row4 col72\" >0.097318</td>\n",
+ " <td id=\"T_168ea_row4_col73\" class=\"data row4 col73\" >-0.974697</td>\n",
+ " <td id=\"T_168ea_row4_col74\" class=\"data row4 col74\" >0.189964</td>\n",
+ " <td id=\"T_168ea_row4_col75\" class=\"data row4 col75\" >1.141155</td>\n",
+ " <td id=\"T_168ea_row4_col76\" class=\"data row4 col76\" >-0.064434</td>\n",
+ " <td id=\"T_168ea_row4_col77\" class=\"data row4 col77\" >1.104971</td>\n",
+ " <td id=\"T_168ea_row4_col78\" class=\"data row4 col78\" >-1.508908</td>\n",
+ " <td id=\"T_168ea_row4_col79\" class=\"data row4 col79\" >-0.031833</td>\n",
+ " <td id=\"T_168ea_row4_col80\" class=\"data row4 col80\" >0.803919</td>\n",
+ " <td id=\"T_168ea_row4_col81\" class=\"data row4 col81\" >-0.659221</td>\n",
+ " <td id=\"T_168ea_row4_col82\" class=\"data row4 col82\" >0.939145</td>\n",
+ " <td id=\"T_168ea_row4_col83\" class=\"data row4 col83\" >0.214041</td>\n",
+ " <td id=\"T_168ea_row4_col84\" class=\"data row4 col84\" >-0.531805</td>\n",
+ " <td id=\"T_168ea_row4_col85\" class=\"data row4 col85\" >0.956060</td>\n",
+ " <td id=\"T_168ea_row4_col86\" class=\"data row4 col86\" >0.249328</td>\n",
+ " <td id=\"T_168ea_row4_col87\" class=\"data row4 col87\" >0.637903</td>\n",
+ " <td id=\"T_168ea_row4_col88\" class=\"data row4 col88\" >-0.510158</td>\n",
+ " <td id=\"T_168ea_row4_col89\" class=\"data row4 col89\" >1.850287</td>\n",
+ " <td id=\"T_168ea_row4_col90\" class=\"data row4 col90\" >-0.348407</td>\n",
+ " <td id=\"T_168ea_row4_col91\" class=\"data row4 col91\" >2.001376</td>\n",
+ " <td id=\"T_168ea_row4_col92\" class=\"data row4 col92\" >-0.389643</td>\n",
+ " <td id=\"T_168ea_row4_col93\" class=\"data row4 col93\" >-0.024786</td>\n",
+ " <td id=\"T_168ea_row4_col94\" class=\"data row4 col94\" >-0.470973</td>\n",
+ " <td id=\"T_168ea_row4_col95\" class=\"data row4 col95\" >0.869339</td>\n",
+ " <td id=\"T_168ea_row4_col96\" class=\"data row4 col96\" >0.170667</td>\n",
+ " <td id=\"T_168ea_row4_col97\" class=\"data row4 col97\" >0.598062</td>\n",
+ " <td id=\"T_168ea_row4_col98\" class=\"data row4 col98\" >1.217262</td>\n",
+ " <td id=\"T_168ea_row4_col99\" class=\"data row4 col99\" >1.274013</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row5\" class=\"row_heading level2 row5\" >1</th>\n",
+ " <td id=\"T_168ea_row5_col0\" class=\"data row5 col0\" >-0.389981</td>\n",
+ " <td id=\"T_168ea_row5_col1\" class=\"data row5 col1\" >-0.752441</td>\n",
+ " <td id=\"T_168ea_row5_col2\" class=\"data row5 col2\" >-0.734871</td>\n",
+ " <td id=\"T_168ea_row5_col3\" class=\"data row5 col3\" >3.517318</td>\n",
+ " <td id=\"T_168ea_row5_col4\" class=\"data row5 col4\" >-1.173559</td>\n",
+ " <td id=\"T_168ea_row5_col5\" class=\"data row5 col5\" >-0.004956</td>\n",
+ " <td id=\"T_168ea_row5_col6\" class=\"data row5 col6\" >0.145419</td>\n",
+ " <td id=\"T_168ea_row5_col7\" class=\"data row5 col7\" >2.151368</td>\n",
+ " <td id=\"T_168ea_row5_col8\" class=\"data row5 col8\" >-3.086037</td>\n",
+ " <td id=\"T_168ea_row5_col9\" class=\"data row5 col9\" >-1.569139</td>\n",
+ " <td id=\"T_168ea_row5_col10\" class=\"data row5 col10\" >1.449784</td>\n",
+ " <td id=\"T_168ea_row5_col11\" class=\"data row5 col11\" >-0.868951</td>\n",
+ " <td id=\"T_168ea_row5_col12\" class=\"data row5 col12\" >-1.687716</td>\n",
+ " <td id=\"T_168ea_row5_col13\" class=\"data row5 col13\" >-0.994401</td>\n",
+ " <td id=\"T_168ea_row5_col14\" class=\"data row5 col14\" >1.153266</td>\n",
+ " <td id=\"T_168ea_row5_col15\" class=\"data row5 col15\" >1.803045</td>\n",
+ " <td id=\"T_168ea_row5_col16\" class=\"data row5 col16\" >-0.819059</td>\n",
+ " <td id=\"T_168ea_row5_col17\" class=\"data row5 col17\" >0.847970</td>\n",
+ " <td id=\"T_168ea_row5_col18\" class=\"data row5 col18\" >0.227102</td>\n",
+ " <td id=\"T_168ea_row5_col19\" class=\"data row5 col19\" >-0.500762</td>\n",
+ " <td id=\"T_168ea_row5_col20\" class=\"data row5 col20\" >0.868210</td>\n",
+ " <td id=\"T_168ea_row5_col21\" class=\"data row5 col21\" >1.823540</td>\n",
+ " <td id=\"T_168ea_row5_col22\" class=\"data row5 col22\" >1.161007</td>\n",
+ " <td id=\"T_168ea_row5_col23\" class=\"data row5 col23\" >-0.307606</td>\n",
+ " <td id=\"T_168ea_row5_col24\" class=\"data row5 col24\" >-0.713416</td>\n",
+ " <td id=\"T_168ea_row5_col25\" class=\"data row5 col25\" >0.363560</td>\n",
+ " <td id=\"T_168ea_row5_col26\" class=\"data row5 col26\" >-0.822162</td>\n",
+ " <td id=\"T_168ea_row5_col27\" class=\"data row5 col27\" >2.427681</td>\n",
+ " <td id=\"T_168ea_row5_col28\" class=\"data row5 col28\" >-0.129537</td>\n",
+ " <td id=\"T_168ea_row5_col29\" class=\"data row5 col29\" >-0.078716</td>\n",
+ " <td id=\"T_168ea_row5_col30\" class=\"data row5 col30\" >1.345644</td>\n",
+ " <td id=\"T_168ea_row5_col31\" class=\"data row5 col31\" >-1.286094</td>\n",
+ " <td id=\"T_168ea_row5_col32\" class=\"data row5 col32\" >0.237242</td>\n",
+ " <td id=\"T_168ea_row5_col33\" class=\"data row5 col33\" >-0.136056</td>\n",
+ " <td id=\"T_168ea_row5_col34\" class=\"data row5 col34\" >0.596664</td>\n",
+ " <td id=\"T_168ea_row5_col35\" class=\"data row5 col35\" >-1.412381</td>\n",
+ " <td id=\"T_168ea_row5_col36\" class=\"data row5 col36\" >1.206341</td>\n",
+ " <td id=\"T_168ea_row5_col37\" class=\"data row5 col37\" >0.299860</td>\n",
+ " <td id=\"T_168ea_row5_col38\" class=\"data row5 col38\" >0.705238</td>\n",
+ " <td id=\"T_168ea_row5_col39\" class=\"data row5 col39\" >0.142412</td>\n",
+ " <td id=\"T_168ea_row5_col40\" class=\"data row5 col40\" >-1.059382</td>\n",
+ " <td id=\"T_168ea_row5_col41\" class=\"data row5 col41\" >0.833468</td>\n",
+ " <td id=\"T_168ea_row5_col42\" class=\"data row5 col42\" >1.060015</td>\n",
+ " <td id=\"T_168ea_row5_col43\" class=\"data row5 col43\" >-0.527045</td>\n",
+ " <td id=\"T_168ea_row5_col44\" class=\"data row5 col44\" >-1.135732</td>\n",
+ " <td id=\"T_168ea_row5_col45\" class=\"data row5 col45\" >-1.140983</td>\n",
+ " <td id=\"T_168ea_row5_col46\" class=\"data row5 col46\" >-0.779540</td>\n",
+ " <td id=\"T_168ea_row5_col47\" class=\"data row5 col47\" >-0.640875</td>\n",
+ " <td id=\"T_168ea_row5_col48\" class=\"data row5 col48\" >-1.217196</td>\n",
+ " <td id=\"T_168ea_row5_col49\" class=\"data row5 col49\" >-1.675663</td>\n",
+ " <td id=\"T_168ea_row5_col50\" class=\"data row5 col50\" >0.241263</td>\n",
+ " <td id=\"T_168ea_row5_col51\" class=\"data row5 col51\" >-0.273322</td>\n",
+ " <td id=\"T_168ea_row5_col52\" class=\"data row5 col52\" >-1.697936</td>\n",
+ " <td id=\"T_168ea_row5_col53\" class=\"data row5 col53\" >-0.594943</td>\n",
+ " <td id=\"T_168ea_row5_col54\" class=\"data row5 col54\" >0.101154</td>\n",
+ " <td id=\"T_168ea_row5_col55\" class=\"data row5 col55\" >1.391735</td>\n",
+ " <td id=\"T_168ea_row5_col56\" class=\"data row5 col56\" >-0.426953</td>\n",
+ " <td id=\"T_168ea_row5_col57\" class=\"data row5 col57\" >1.008344</td>\n",
+ " <td id=\"T_168ea_row5_col58\" class=\"data row5 col58\" >-0.818577</td>\n",
+ " <td id=\"T_168ea_row5_col59\" class=\"data row5 col59\" >1.924570</td>\n",
+ " <td id=\"T_168ea_row5_col60\" class=\"data row5 col60\" >-0.578900</td>\n",
+ " <td id=\"T_168ea_row5_col61\" class=\"data row5 col61\" >-0.457395</td>\n",
+ " <td id=\"T_168ea_row5_col62\" class=\"data row5 col62\" >-1.096705</td>\n",
+ " <td id=\"T_168ea_row5_col63\" class=\"data row5 col63\" >0.418522</td>\n",
+ " <td id=\"T_168ea_row5_col64\" class=\"data row5 col64\" >-0.155623</td>\n",
+ " <td id=\"T_168ea_row5_col65\" class=\"data row5 col65\" >0.169706</td>\n",
+ " <td id=\"T_168ea_row5_col66\" class=\"data row5 col66\" >-2.533706</td>\n",
+ " <td id=\"T_168ea_row5_col67\" class=\"data row5 col67\" >0.018904</td>\n",
+ " <td id=\"T_168ea_row5_col68\" class=\"data row5 col68\" >1.434160</td>\n",
+ " <td id=\"T_168ea_row5_col69\" class=\"data row5 col69\" >0.744095</td>\n",
+ " <td id=\"T_168ea_row5_col70\" class=\"data row5 col70\" >0.647626</td>\n",
+ " <td id=\"T_168ea_row5_col71\" class=\"data row5 col71\" >-0.770309</td>\n",
+ " <td id=\"T_168ea_row5_col72\" class=\"data row5 col72\" >2.329141</td>\n",
+ " <td id=\"T_168ea_row5_col73\" class=\"data row5 col73\" >-0.141547</td>\n",
+ " <td id=\"T_168ea_row5_col74\" class=\"data row5 col74\" >-1.761594</td>\n",
+ " <td id=\"T_168ea_row5_col75\" class=\"data row5 col75\" >0.702091</td>\n",
+ " <td id=\"T_168ea_row5_col76\" class=\"data row5 col76\" >-1.531450</td>\n",
+ " <td id=\"T_168ea_row5_col77\" class=\"data row5 col77\" >-0.788427</td>\n",
+ " <td id=\"T_168ea_row5_col78\" class=\"data row5 col78\" >-0.184622</td>\n",
+ " <td id=\"T_168ea_row5_col79\" class=\"data row5 col79\" >-1.942321</td>\n",
+ " <td id=\"T_168ea_row5_col80\" class=\"data row5 col80\" >1.530113</td>\n",
+ " <td id=\"T_168ea_row5_col81\" class=\"data row5 col81\" >0.503406</td>\n",
+ " <td id=\"T_168ea_row5_col82\" class=\"data row5 col82\" >1.105845</td>\n",
+ " <td id=\"T_168ea_row5_col83\" class=\"data row5 col83\" >-0.935120</td>\n",
+ " <td id=\"T_168ea_row5_col84\" class=\"data row5 col84\" >-1.115483</td>\n",
+ " <td id=\"T_168ea_row5_col85\" class=\"data row5 col85\" >-2.249762</td>\n",
+ " <td id=\"T_168ea_row5_col86\" class=\"data row5 col86\" >1.307135</td>\n",
+ " <td id=\"T_168ea_row5_col87\" class=\"data row5 col87\" >0.788412</td>\n",
+ " <td id=\"T_168ea_row5_col88\" class=\"data row5 col88\" >-0.441091</td>\n",
+ " <td id=\"T_168ea_row5_col89\" class=\"data row5 col89\" >0.073561</td>\n",
+ " <td id=\"T_168ea_row5_col90\" class=\"data row5 col90\" >0.812101</td>\n",
+ " <td id=\"T_168ea_row5_col91\" class=\"data row5 col91\" >-0.916146</td>\n",
+ " <td id=\"T_168ea_row5_col92\" class=\"data row5 col92\" >1.573714</td>\n",
+ " <td id=\"T_168ea_row5_col93\" class=\"data row5 col93\" >-0.309508</td>\n",
+ " <td id=\"T_168ea_row5_col94\" class=\"data row5 col94\" >0.499987</td>\n",
+ " <td id=\"T_168ea_row5_col95\" class=\"data row5 col95\" >0.187594</td>\n",
+ " <td id=\"T_168ea_row5_col96\" class=\"data row5 col96\" >0.558913</td>\n",
+ " <td id=\"T_168ea_row5_col97\" class=\"data row5 col97\" >0.903246</td>\n",
+ " <td id=\"T_168ea_row5_col98\" class=\"data row5 col98\" >0.317901</td>\n",
+ " <td id=\"T_168ea_row5_col99\" class=\"data row5 col99\" >-0.809797</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row6\" class=\"row_heading level2 row6\" >2</th>\n",
+ " <td id=\"T_168ea_row6_col0\" class=\"data row6 col0\" >1.128248</td>\n",
+ " <td id=\"T_168ea_row6_col1\" class=\"data row6 col1\" >1.516826</td>\n",
+ " <td id=\"T_168ea_row6_col2\" class=\"data row6 col2\" >-0.186735</td>\n",
+ " <td id=\"T_168ea_row6_col3\" class=\"data row6 col3\" >-0.668157</td>\n",
+ " <td id=\"T_168ea_row6_col4\" class=\"data row6 col4\" >1.132259</td>\n",
+ " <td id=\"T_168ea_row6_col5\" class=\"data row6 col5\" >-0.246648</td>\n",
+ " <td id=\"T_168ea_row6_col6\" class=\"data row6 col6\" >-0.855167</td>\n",
+ " <td id=\"T_168ea_row6_col7\" class=\"data row6 col7\" >0.732283</td>\n",
+ " <td id=\"T_168ea_row6_col8\" class=\"data row6 col8\" >0.931802</td>\n",
+ " <td id=\"T_168ea_row6_col9\" class=\"data row6 col9\" >1.318684</td>\n",
+ " <td id=\"T_168ea_row6_col10\" class=\"data row6 col10\" >-1.198418</td>\n",
+ " <td id=\"T_168ea_row6_col11\" class=\"data row6 col11\" >-1.149318</td>\n",
+ " <td id=\"T_168ea_row6_col12\" class=\"data row6 col12\" >0.586321</td>\n",
+ " <td id=\"T_168ea_row6_col13\" class=\"data row6 col13\" >-1.171937</td>\n",
+ " <td id=\"T_168ea_row6_col14\" class=\"data row6 col14\" >-0.607731</td>\n",
+ " <td id=\"T_168ea_row6_col15\" class=\"data row6 col15\" >2.753747</td>\n",
+ " <td id=\"T_168ea_row6_col16\" class=\"data row6 col16\" >1.479287</td>\n",
+ " <td id=\"T_168ea_row6_col17\" class=\"data row6 col17\" >-1.136365</td>\n",
+ " <td id=\"T_168ea_row6_col18\" class=\"data row6 col18\" >-0.020485</td>\n",
+ " <td id=\"T_168ea_row6_col19\" class=\"data row6 col19\" >0.320444</td>\n",
+ " <td id=\"T_168ea_row6_col20\" class=\"data row6 col20\" >-1.955755</td>\n",
+ " <td id=\"T_168ea_row6_col21\" class=\"data row6 col21\" >0.660402</td>\n",
+ " <td id=\"T_168ea_row6_col22\" class=\"data row6 col22\" >-1.545371</td>\n",
+ " <td id=\"T_168ea_row6_col23\" class=\"data row6 col23\" >0.200519</td>\n",
+ " <td id=\"T_168ea_row6_col24\" class=\"data row6 col24\" >-0.017263</td>\n",
+ " <td id=\"T_168ea_row6_col25\" class=\"data row6 col25\" >1.634686</td>\n",
+ " <td id=\"T_168ea_row6_col26\" class=\"data row6 col26\" >0.599246</td>\n",
+ " <td id=\"T_168ea_row6_col27\" class=\"data row6 col27\" >0.462989</td>\n",
+ " <td id=\"T_168ea_row6_col28\" class=\"data row6 col28\" >0.023721</td>\n",
+ " <td id=\"T_168ea_row6_col29\" class=\"data row6 col29\" >0.225546</td>\n",
+ " <td id=\"T_168ea_row6_col30\" class=\"data row6 col30\" >0.170972</td>\n",
+ " <td id=\"T_168ea_row6_col31\" class=\"data row6 col31\" >-0.027496</td>\n",
+ " <td id=\"T_168ea_row6_col32\" class=\"data row6 col32\" >-0.061233</td>\n",
+ " <td id=\"T_168ea_row6_col33\" class=\"data row6 col33\" >-0.566411</td>\n",
+ " <td id=\"T_168ea_row6_col34\" class=\"data row6 col34\" >-0.669567</td>\n",
+ " <td id=\"T_168ea_row6_col35\" class=\"data row6 col35\" >0.601618</td>\n",
+ " <td id=\"T_168ea_row6_col36\" class=\"data row6 col36\" >0.503656</td>\n",
+ " <td id=\"T_168ea_row6_col37\" class=\"data row6 col37\" >-0.678253</td>\n",
+ " <td id=\"T_168ea_row6_col38\" class=\"data row6 col38\" >-2.907108</td>\n",
+ " <td id=\"T_168ea_row6_col39\" class=\"data row6 col39\" >-1.717123</td>\n",
+ " <td id=\"T_168ea_row6_col40\" class=\"data row6 col40\" >0.397631</td>\n",
+ " <td id=\"T_168ea_row6_col41\" class=\"data row6 col41\" >1.300108</td>\n",
+ " <td id=\"T_168ea_row6_col42\" class=\"data row6 col42\" >0.215821</td>\n",
+ " <td id=\"T_168ea_row6_col43\" class=\"data row6 col43\" >-0.593075</td>\n",
+ " <td id=\"T_168ea_row6_col44\" class=\"data row6 col44\" >-0.225944</td>\n",
+ " <td id=\"T_168ea_row6_col45\" class=\"data row6 col45\" >-0.946057</td>\n",
+ " <td id=\"T_168ea_row6_col46\" class=\"data row6 col46\" >1.000308</td>\n",
+ " <td id=\"T_168ea_row6_col47\" class=\"data row6 col47\" >0.393160</td>\n",
+ " <td id=\"T_168ea_row6_col48\" class=\"data row6 col48\" >1.342074</td>\n",
+ " <td id=\"T_168ea_row6_col49\" class=\"data row6 col49\" >-0.370687</td>\n",
+ " <td id=\"T_168ea_row6_col50\" class=\"data row6 col50\" >-0.166413</td>\n",
+ " <td id=\"T_168ea_row6_col51\" class=\"data row6 col51\" >-0.419814</td>\n",
+ " <td id=\"T_168ea_row6_col52\" class=\"data row6 col52\" >-0.255931</td>\n",
+ " <td id=\"T_168ea_row6_col53\" class=\"data row6 col53\" >1.789478</td>\n",
+ " <td id=\"T_168ea_row6_col54\" class=\"data row6 col54\" >0.282378</td>\n",
+ " <td id=\"T_168ea_row6_col55\" class=\"data row6 col55\" >0.742260</td>\n",
+ " <td id=\"T_168ea_row6_col56\" class=\"data row6 col56\" >-0.050498</td>\n",
+ " <td id=\"T_168ea_row6_col57\" class=\"data row6 col57\" >1.415309</td>\n",
+ " <td id=\"T_168ea_row6_col58\" class=\"data row6 col58\" >0.838166</td>\n",
+ " <td id=\"T_168ea_row6_col59\" class=\"data row6 col59\" >-1.400292</td>\n",
+ " <td id=\"T_168ea_row6_col60\" class=\"data row6 col60\" >-0.937976</td>\n",
+ " <td id=\"T_168ea_row6_col61\" class=\"data row6 col61\" >-1.499148</td>\n",
+ " <td id=\"T_168ea_row6_col62\" class=\"data row6 col62\" >0.801859</td>\n",
+ " <td id=\"T_168ea_row6_col63\" class=\"data row6 col63\" >0.224824</td>\n",
+ " <td id=\"T_168ea_row6_col64\" class=\"data row6 col64\" >0.283572</td>\n",
+ " <td id=\"T_168ea_row6_col65\" class=\"data row6 col65\" >0.643703</td>\n",
+ " <td id=\"T_168ea_row6_col66\" class=\"data row6 col66\" >-1.198465</td>\n",
+ " <td id=\"T_168ea_row6_col67\" class=\"data row6 col67\" >0.527206</td>\n",
+ " <td id=\"T_168ea_row6_col68\" class=\"data row6 col68\" >0.215202</td>\n",
+ " <td id=\"T_168ea_row6_col69\" class=\"data row6 col69\" >0.437048</td>\n",
+ " <td id=\"T_168ea_row6_col70\" class=\"data row6 col70\" >1.312868</td>\n",
+ " <td id=\"T_168ea_row6_col71\" class=\"data row6 col71\" >0.741243</td>\n",
+ " <td id=\"T_168ea_row6_col72\" class=\"data row6 col72\" >0.077988</td>\n",
+ " <td id=\"T_168ea_row6_col73\" class=\"data row6 col73\" >0.006123</td>\n",
+ " <td id=\"T_168ea_row6_col74\" class=\"data row6 col74\" >0.190370</td>\n",
+ " <td id=\"T_168ea_row6_col75\" class=\"data row6 col75\" >0.018007</td>\n",
+ " <td id=\"T_168ea_row6_col76\" class=\"data row6 col76\" >-1.026036</td>\n",
+ " <td id=\"T_168ea_row6_col77\" class=\"data row6 col77\" >-2.378430</td>\n",
+ " <td id=\"T_168ea_row6_col78\" class=\"data row6 col78\" >-1.069949</td>\n",
+ " <td id=\"T_168ea_row6_col79\" class=\"data row6 col79\" >0.843822</td>\n",
+ " <td id=\"T_168ea_row6_col80\" class=\"data row6 col80\" >1.289216</td>\n",
+ " <td id=\"T_168ea_row6_col81\" class=\"data row6 col81\" >-1.423369</td>\n",
+ " <td id=\"T_168ea_row6_col82\" class=\"data row6 col82\" >-0.462887</td>\n",
+ " <td id=\"T_168ea_row6_col83\" class=\"data row6 col83\" >0.197330</td>\n",
+ " <td id=\"T_168ea_row6_col84\" class=\"data row6 col84\" >-0.935076</td>\n",
+ " <td id=\"T_168ea_row6_col85\" class=\"data row6 col85\" >0.441271</td>\n",
+ " <td id=\"T_168ea_row6_col86\" class=\"data row6 col86\" >0.414643</td>\n",
+ " <td id=\"T_168ea_row6_col87\" class=\"data row6 col87\" >-0.377887</td>\n",
+ " <td id=\"T_168ea_row6_col88\" class=\"data row6 col88\" >-0.530515</td>\n",
+ " <td id=\"T_168ea_row6_col89\" class=\"data row6 col89\" >0.621592</td>\n",
+ " <td id=\"T_168ea_row6_col90\" class=\"data row6 col90\" >1.009572</td>\n",
+ " <td id=\"T_168ea_row6_col91\" class=\"data row6 col91\" >0.569718</td>\n",
+ " <td id=\"T_168ea_row6_col92\" class=\"data row6 col92\" >0.175291</td>\n",
+ " <td id=\"T_168ea_row6_col93\" class=\"data row6 col93\" >-0.656279</td>\n",
+ " <td id=\"T_168ea_row6_col94\" class=\"data row6 col94\" >-0.112273</td>\n",
+ " <td id=\"T_168ea_row6_col95\" class=\"data row6 col95\" >-0.392137</td>\n",
+ " <td id=\"T_168ea_row6_col96\" class=\"data row6 col96\" >-1.043558</td>\n",
+ " <td id=\"T_168ea_row6_col97\" class=\"data row6 col97\" >-0.467318</td>\n",
+ " <td id=\"T_168ea_row6_col98\" class=\"data row6 col98\" >-0.384329</td>\n",
+ " <td id=\"T_168ea_row6_col99\" class=\"data row6 col99\" >-2.009207</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row7\" class=\"row_heading level2 row7\" >3</th>\n",
+ " <td id=\"T_168ea_row7_col0\" class=\"data row7 col0\" >0.658598</td>\n",
+ " <td id=\"T_168ea_row7_col1\" class=\"data row7 col1\" >0.101830</td>\n",
+ " <td id=\"T_168ea_row7_col2\" class=\"data row7 col2\" >-0.682781</td>\n",
+ " <td id=\"T_168ea_row7_col3\" class=\"data row7 col3\" >0.229349</td>\n",
+ " <td id=\"T_168ea_row7_col4\" class=\"data row7 col4\" >-0.305657</td>\n",
+ " <td id=\"T_168ea_row7_col5\" class=\"data row7 col5\" >0.404877</td>\n",
+ " <td id=\"T_168ea_row7_col6\" class=\"data row7 col6\" >0.252244</td>\n",
+ " <td id=\"T_168ea_row7_col7\" class=\"data row7 col7\" >-0.837784</td>\n",
+ " <td id=\"T_168ea_row7_col8\" class=\"data row7 col8\" >-0.039624</td>\n",
+ " <td id=\"T_168ea_row7_col9\" class=\"data row7 col9\" >0.329457</td>\n",
+ " <td id=\"T_168ea_row7_col10\" class=\"data row7 col10\" >0.751694</td>\n",
+ " <td id=\"T_168ea_row7_col11\" class=\"data row7 col11\" >1.469070</td>\n",
+ " <td id=\"T_168ea_row7_col12\" class=\"data row7 col12\" >-0.157199</td>\n",
+ " <td id=\"T_168ea_row7_col13\" class=\"data row7 col13\" >1.032628</td>\n",
+ " <td id=\"T_168ea_row7_col14\" class=\"data row7 col14\" >-0.584639</td>\n",
+ " <td id=\"T_168ea_row7_col15\" class=\"data row7 col15\" >-0.925544</td>\n",
+ " <td id=\"T_168ea_row7_col16\" class=\"data row7 col16\" >0.342474</td>\n",
+ " <td id=\"T_168ea_row7_col17\" class=\"data row7 col17\" >-0.969363</td>\n",
+ " <td id=\"T_168ea_row7_col18\" class=\"data row7 col18\" >0.133480</td>\n",
+ " <td id=\"T_168ea_row7_col19\" class=\"data row7 col19\" >-0.385974</td>\n",
+ " <td id=\"T_168ea_row7_col20\" class=\"data row7 col20\" >-0.600278</td>\n",
+ " <td id=\"T_168ea_row7_col21\" class=\"data row7 col21\" >0.281939</td>\n",
+ " <td id=\"T_168ea_row7_col22\" class=\"data row7 col22\" >0.868579</td>\n",
+ " <td id=\"T_168ea_row7_col23\" class=\"data row7 col23\" >1.129803</td>\n",
+ " <td id=\"T_168ea_row7_col24\" class=\"data row7 col24\" >-0.041898</td>\n",
+ " <td id=\"T_168ea_row7_col25\" class=\"data row7 col25\" >0.961193</td>\n",
+ " <td id=\"T_168ea_row7_col26\" class=\"data row7 col26\" >0.131521</td>\n",
+ " <td id=\"T_168ea_row7_col27\" class=\"data row7 col27\" >-0.792889</td>\n",
+ " <td id=\"T_168ea_row7_col28\" class=\"data row7 col28\" >-1.285737</td>\n",
+ " <td id=\"T_168ea_row7_col29\" class=\"data row7 col29\" >0.073934</td>\n",
+ " <td id=\"T_168ea_row7_col30\" class=\"data row7 col30\" >-1.333315</td>\n",
+ " <td id=\"T_168ea_row7_col31\" class=\"data row7 col31\" >-1.044125</td>\n",
+ " <td id=\"T_168ea_row7_col32\" class=\"data row7 col32\" >1.277338</td>\n",
+ " <td id=\"T_168ea_row7_col33\" class=\"data row7 col33\" >1.492257</td>\n",
+ " <td id=\"T_168ea_row7_col34\" class=\"data row7 col34\" >0.411379</td>\n",
+ " <td id=\"T_168ea_row7_col35\" class=\"data row7 col35\" >1.771805</td>\n",
+ " <td id=\"T_168ea_row7_col36\" class=\"data row7 col36\" >-1.111128</td>\n",
+ " <td id=\"T_168ea_row7_col37\" class=\"data row7 col37\" >1.123233</td>\n",
+ " <td id=\"T_168ea_row7_col38\" class=\"data row7 col38\" >-1.019449</td>\n",
+ " <td id=\"T_168ea_row7_col39\" class=\"data row7 col39\" >1.738357</td>\n",
+ " <td id=\"T_168ea_row7_col40\" class=\"data row7 col40\" >-0.690764</td>\n",
+ " <td id=\"T_168ea_row7_col41\" class=\"data row7 col41\" >-0.120710</td>\n",
+ " <td id=\"T_168ea_row7_col42\" class=\"data row7 col42\" >-0.421359</td>\n",
+ " <td id=\"T_168ea_row7_col43\" class=\"data row7 col43\" >-0.727294</td>\n",
+ " <td id=\"T_168ea_row7_col44\" class=\"data row7 col44\" >-0.857759</td>\n",
+ " <td id=\"T_168ea_row7_col45\" class=\"data row7 col45\" >-0.069436</td>\n",
+ " <td id=\"T_168ea_row7_col46\" class=\"data row7 col46\" >-0.328334</td>\n",
+ " <td id=\"T_168ea_row7_col47\" class=\"data row7 col47\" >-0.558180</td>\n",
+ " <td id=\"T_168ea_row7_col48\" class=\"data row7 col48\" >1.063474</td>\n",
+ " <td id=\"T_168ea_row7_col49\" class=\"data row7 col49\" >-0.519133</td>\n",
+ " <td id=\"T_168ea_row7_col50\" class=\"data row7 col50\" >-0.496902</td>\n",
+ " <td id=\"T_168ea_row7_col51\" class=\"data row7 col51\" >1.089589</td>\n",
+ " <td id=\"T_168ea_row7_col52\" class=\"data row7 col52\" >-1.615801</td>\n",
+ " <td id=\"T_168ea_row7_col53\" class=\"data row7 col53\" >0.080174</td>\n",
+ " <td id=\"T_168ea_row7_col54\" class=\"data row7 col54\" >-0.229938</td>\n",
+ " <td id=\"T_168ea_row7_col55\" class=\"data row7 col55\" >-0.498420</td>\n",
+ " <td id=\"T_168ea_row7_col56\" class=\"data row7 col56\" >-0.624615</td>\n",
+ " <td id=\"T_168ea_row7_col57\" class=\"data row7 col57\" >0.059481</td>\n",
+ " <td id=\"T_168ea_row7_col58\" class=\"data row7 col58\" >-0.093158</td>\n",
+ " <td id=\"T_168ea_row7_col59\" class=\"data row7 col59\" >-1.784549</td>\n",
+ " <td id=\"T_168ea_row7_col60\" class=\"data row7 col60\" >-0.503789</td>\n",
+ " <td id=\"T_168ea_row7_col61\" class=\"data row7 col61\" >-0.140528</td>\n",
+ " <td id=\"T_168ea_row7_col62\" class=\"data row7 col62\" >0.002653</td>\n",
+ " <td id=\"T_168ea_row7_col63\" class=\"data row7 col63\" >-0.484930</td>\n",
+ " <td id=\"T_168ea_row7_col64\" class=\"data row7 col64\" >0.055914</td>\n",
+ " <td id=\"T_168ea_row7_col65\" class=\"data row7 col65\" >-0.680948</td>\n",
+ " <td id=\"T_168ea_row7_col66\" class=\"data row7 col66\" >-0.994271</td>\n",
+ " <td id=\"T_168ea_row7_col67\" class=\"data row7 col67\" >1.277052</td>\n",
+ " <td id=\"T_168ea_row7_col68\" class=\"data row7 col68\" >0.037651</td>\n",
+ " <td id=\"T_168ea_row7_col69\" class=\"data row7 col69\" >2.155421</td>\n",
+ " <td id=\"T_168ea_row7_col70\" class=\"data row7 col70\" >-0.437589</td>\n",
+ " <td id=\"T_168ea_row7_col71\" class=\"data row7 col71\" >0.696404</td>\n",
+ " <td id=\"T_168ea_row7_col72\" class=\"data row7 col72\" >0.417752</td>\n",
+ " <td id=\"T_168ea_row7_col73\" class=\"data row7 col73\" >-0.544785</td>\n",
+ " <td id=\"T_168ea_row7_col74\" class=\"data row7 col74\" >1.190690</td>\n",
+ " <td id=\"T_168ea_row7_col75\" class=\"data row7 col75\" >0.978262</td>\n",
+ " <td id=\"T_168ea_row7_col76\" class=\"data row7 col76\" >0.752102</td>\n",
+ " <td id=\"T_168ea_row7_col77\" class=\"data row7 col77\" >0.504472</td>\n",
+ " <td id=\"T_168ea_row7_col78\" class=\"data row7 col78\" >0.139853</td>\n",
+ " <td id=\"T_168ea_row7_col79\" class=\"data row7 col79\" >-0.505089</td>\n",
+ " <td id=\"T_168ea_row7_col80\" class=\"data row7 col80\" >-0.264975</td>\n",
+ " <td id=\"T_168ea_row7_col81\" class=\"data row7 col81\" >-1.603194</td>\n",
+ " <td id=\"T_168ea_row7_col82\" class=\"data row7 col82\" >0.731847</td>\n",
+ " <td id=\"T_168ea_row7_col83\" class=\"data row7 col83\" >0.010903</td>\n",
+ " <td id=\"T_168ea_row7_col84\" class=\"data row7 col84\" >-1.165346</td>\n",
+ " <td id=\"T_168ea_row7_col85\" class=\"data row7 col85\" >-0.125195</td>\n",
+ " <td id=\"T_168ea_row7_col86\" class=\"data row7 col86\" >-1.032685</td>\n",
+ " <td id=\"T_168ea_row7_col87\" class=\"data row7 col87\" >-0.465520</td>\n",
+ " <td id=\"T_168ea_row7_col88\" class=\"data row7 col88\" >1.514808</td>\n",
+ " <td id=\"T_168ea_row7_col89\" class=\"data row7 col89\" >0.304762</td>\n",
+ " <td id=\"T_168ea_row7_col90\" class=\"data row7 col90\" >0.793414</td>\n",
+ " <td id=\"T_168ea_row7_col91\" class=\"data row7 col91\" >0.314635</td>\n",
+ " <td id=\"T_168ea_row7_col92\" class=\"data row7 col92\" >-1.638279</td>\n",
+ " <td id=\"T_168ea_row7_col93\" class=\"data row7 col93\" >0.111737</td>\n",
+ " <td id=\"T_168ea_row7_col94\" class=\"data row7 col94\" >-0.777037</td>\n",
+ " <td id=\"T_168ea_row7_col95\" class=\"data row7 col95\" >0.251783</td>\n",
+ " <td id=\"T_168ea_row7_col96\" class=\"data row7 col96\" >1.126303</td>\n",
+ " <td id=\"T_168ea_row7_col97\" class=\"data row7 col97\" >-0.808798</td>\n",
+ " <td id=\"T_168ea_row7_col98\" class=\"data row7 col98\" >0.422064</td>\n",
+ " <td id=\"T_168ea_row7_col99\" class=\"data row7 col99\" >-0.349264</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level0_row8\" class=\"row_heading level0 row8\" rowspan=\"8\">B</th>\n",
+ " <th id=\"T_168ea_level1_row8\" class=\"row_heading level1 row8\" rowspan=\"4\">0</th>\n",
+ " <th id=\"T_168ea_level2_row8\" class=\"row_heading level2 row8\" >0</th>\n",
+ " <td id=\"T_168ea_row8_col0\" class=\"data row8 col0\" >-0.356362</td>\n",
+ " <td id=\"T_168ea_row8_col1\" class=\"data row8 col1\" >-0.089227</td>\n",
+ " <td id=\"T_168ea_row8_col2\" class=\"data row8 col2\" >0.609373</td>\n",
+ " <td id=\"T_168ea_row8_col3\" class=\"data row8 col3\" >0.542382</td>\n",
+ " <td id=\"T_168ea_row8_col4\" class=\"data row8 col4\" >-0.768681</td>\n",
+ " <td id=\"T_168ea_row8_col5\" class=\"data row8 col5\" >-0.048074</td>\n",
+ " <td id=\"T_168ea_row8_col6\" class=\"data row8 col6\" >2.015458</td>\n",
+ " <td id=\"T_168ea_row8_col7\" class=\"data row8 col7\" >-1.552351</td>\n",
+ " <td id=\"T_168ea_row8_col8\" class=\"data row8 col8\" >0.251552</td>\n",
+ " <td id=\"T_168ea_row8_col9\" class=\"data row8 col9\" >1.459635</td>\n",
+ " <td id=\"T_168ea_row8_col10\" class=\"data row8 col10\" >0.949707</td>\n",
+ " <td id=\"T_168ea_row8_col11\" class=\"data row8 col11\" >0.339465</td>\n",
+ " <td id=\"T_168ea_row8_col12\" class=\"data row8 col12\" >-0.001372</td>\n",
+ " <td id=\"T_168ea_row8_col13\" class=\"data row8 col13\" >1.798589</td>\n",
+ " <td id=\"T_168ea_row8_col14\" class=\"data row8 col14\" >1.559163</td>\n",
+ " <td id=\"T_168ea_row8_col15\" class=\"data row8 col15\" >0.231783</td>\n",
+ " <td id=\"T_168ea_row8_col16\" class=\"data row8 col16\" >0.423141</td>\n",
+ " <td id=\"T_168ea_row8_col17\" class=\"data row8 col17\" >-0.310530</td>\n",
+ " <td id=\"T_168ea_row8_col18\" class=\"data row8 col18\" >0.353795</td>\n",
+ " <td id=\"T_168ea_row8_col19\" class=\"data row8 col19\" >2.173336</td>\n",
+ " <td id=\"T_168ea_row8_col20\" class=\"data row8 col20\" >-0.196247</td>\n",
+ " <td id=\"T_168ea_row8_col21\" class=\"data row8 col21\" >-0.375636</td>\n",
+ " <td id=\"T_168ea_row8_col22\" class=\"data row8 col22\" >-0.858221</td>\n",
+ " <td id=\"T_168ea_row8_col23\" class=\"data row8 col23\" >0.258410</td>\n",
+ " <td id=\"T_168ea_row8_col24\" class=\"data row8 col24\" >0.656430</td>\n",
+ " <td id=\"T_168ea_row8_col25\" class=\"data row8 col25\" >0.960819</td>\n",
+ " <td id=\"T_168ea_row8_col26\" class=\"data row8 col26\" >1.137893</td>\n",
+ " <td id=\"T_168ea_row8_col27\" class=\"data row8 col27\" >1.553405</td>\n",
+ " <td id=\"T_168ea_row8_col28\" class=\"data row8 col28\" >0.038981</td>\n",
+ " <td id=\"T_168ea_row8_col29\" class=\"data row8 col29\" >-0.632038</td>\n",
+ " <td id=\"T_168ea_row8_col30\" class=\"data row8 col30\" >-0.132009</td>\n",
+ " <td id=\"T_168ea_row8_col31\" class=\"data row8 col31\" >-1.834997</td>\n",
+ " <td id=\"T_168ea_row8_col32\" class=\"data row8 col32\" >-0.242576</td>\n",
+ " <td id=\"T_168ea_row8_col33\" class=\"data row8 col33\" >-0.297879</td>\n",
+ " <td id=\"T_168ea_row8_col34\" class=\"data row8 col34\" >-0.441559</td>\n",
+ " <td id=\"T_168ea_row8_col35\" class=\"data row8 col35\" >-0.769691</td>\n",
+ " <td id=\"T_168ea_row8_col36\" class=\"data row8 col36\" >0.224077</td>\n",
+ " <td id=\"T_168ea_row8_col37\" class=\"data row8 col37\" >-0.153009</td>\n",
+ " <td id=\"T_168ea_row8_col38\" class=\"data row8 col38\" >0.519526</td>\n",
+ " <td id=\"T_168ea_row8_col39\" class=\"data row8 col39\" >-0.680188</td>\n",
+ " <td id=\"T_168ea_row8_col40\" class=\"data row8 col40\" >0.535851</td>\n",
+ " <td id=\"T_168ea_row8_col41\" class=\"data row8 col41\" >0.671496</td>\n",
+ " <td id=\"T_168ea_row8_col42\" class=\"data row8 col42\" >-0.183064</td>\n",
+ " <td id=\"T_168ea_row8_col43\" class=\"data row8 col43\" >0.301234</td>\n",
+ " <td id=\"T_168ea_row8_col44\" class=\"data row8 col44\" >1.288256</td>\n",
+ " <td id=\"T_168ea_row8_col45\" class=\"data row8 col45\" >-2.478240</td>\n",
+ " <td id=\"T_168ea_row8_col46\" class=\"data row8 col46\" >-0.360403</td>\n",
+ " <td id=\"T_168ea_row8_col47\" class=\"data row8 col47\" >0.424067</td>\n",
+ " <td id=\"T_168ea_row8_col48\" class=\"data row8 col48\" >-0.834659</td>\n",
+ " <td id=\"T_168ea_row8_col49\" class=\"data row8 col49\" >-0.128464</td>\n",
+ " <td id=\"T_168ea_row8_col50\" class=\"data row8 col50\" >-0.489013</td>\n",
+ " <td id=\"T_168ea_row8_col51\" class=\"data row8 col51\" >-0.014888</td>\n",
+ " <td id=\"T_168ea_row8_col52\" class=\"data row8 col52\" >-1.461230</td>\n",
+ " <td id=\"T_168ea_row8_col53\" class=\"data row8 col53\" >-1.435223</td>\n",
+ " <td id=\"T_168ea_row8_col54\" class=\"data row8 col54\" >-1.319802</td>\n",
+ " <td id=\"T_168ea_row8_col55\" class=\"data row8 col55\" >1.083675</td>\n",
+ " <td id=\"T_168ea_row8_col56\" class=\"data row8 col56\" >0.979140</td>\n",
+ " <td id=\"T_168ea_row8_col57\" class=\"data row8 col57\" >-0.375291</td>\n",
+ " <td id=\"T_168ea_row8_col58\" class=\"data row8 col58\" >1.110189</td>\n",
+ " <td id=\"T_168ea_row8_col59\" class=\"data row8 col59\" >-1.011351</td>\n",
+ " <td id=\"T_168ea_row8_col60\" class=\"data row8 col60\" >0.587886</td>\n",
+ " <td id=\"T_168ea_row8_col61\" class=\"data row8 col61\" >-0.822775</td>\n",
+ " <td id=\"T_168ea_row8_col62\" class=\"data row8 col62\" >-1.183865</td>\n",
+ " <td id=\"T_168ea_row8_col63\" class=\"data row8 col63\" >1.455173</td>\n",
+ " <td id=\"T_168ea_row8_col64\" class=\"data row8 col64\" >1.134328</td>\n",
+ " <td id=\"T_168ea_row8_col65\" class=\"data row8 col65\" >0.239403</td>\n",
+ " <td id=\"T_168ea_row8_col66\" class=\"data row8 col66\" >-0.837991</td>\n",
+ " <td id=\"T_168ea_row8_col67\" class=\"data row8 col67\" >-1.130932</td>\n",
+ " <td id=\"T_168ea_row8_col68\" class=\"data row8 col68\" >0.783168</td>\n",
+ " <td id=\"T_168ea_row8_col69\" class=\"data row8 col69\" >1.845520</td>\n",
+ " <td id=\"T_168ea_row8_col70\" class=\"data row8 col70\" >1.437072</td>\n",
+ " <td id=\"T_168ea_row8_col71\" class=\"data row8 col71\" >-1.198443</td>\n",
+ " <td id=\"T_168ea_row8_col72\" class=\"data row8 col72\" >1.379098</td>\n",
+ " <td id=\"T_168ea_row8_col73\" class=\"data row8 col73\" >2.129113</td>\n",
+ " <td id=\"T_168ea_row8_col74\" class=\"data row8 col74\" >0.260096</td>\n",
+ " <td id=\"T_168ea_row8_col75\" class=\"data row8 col75\" >-0.011975</td>\n",
+ " <td id=\"T_168ea_row8_col76\" class=\"data row8 col76\" >0.043302</td>\n",
+ " <td id=\"T_168ea_row8_col77\" class=\"data row8 col77\" >0.722941</td>\n",
+ " <td id=\"T_168ea_row8_col78\" class=\"data row8 col78\" >1.028152</td>\n",
+ " <td id=\"T_168ea_row8_col79\" class=\"data row8 col79\" >-0.235806</td>\n",
+ " <td id=\"T_168ea_row8_col80\" class=\"data row8 col80\" >1.145245</td>\n",
+ " <td id=\"T_168ea_row8_col81\" class=\"data row8 col81\" >-1.359598</td>\n",
+ " <td id=\"T_168ea_row8_col82\" class=\"data row8 col82\" >0.232189</td>\n",
+ " <td id=\"T_168ea_row8_col83\" class=\"data row8 col83\" >0.503712</td>\n",
+ " <td id=\"T_168ea_row8_col84\" class=\"data row8 col84\" >-0.614264</td>\n",
+ " <td id=\"T_168ea_row8_col85\" class=\"data row8 col85\" >-0.530606</td>\n",
+ " <td id=\"T_168ea_row8_col86\" class=\"data row8 col86\" >-2.435803</td>\n",
+ " <td id=\"T_168ea_row8_col87\" class=\"data row8 col87\" >-0.255238</td>\n",
+ " <td id=\"T_168ea_row8_col88\" class=\"data row8 col88\" >-0.064423</td>\n",
+ " <td id=\"T_168ea_row8_col89\" class=\"data row8 col89\" >0.784643</td>\n",
+ " <td id=\"T_168ea_row8_col90\" class=\"data row8 col90\" >0.256346</td>\n",
+ " <td id=\"T_168ea_row8_col91\" class=\"data row8 col91\" >0.128023</td>\n",
+ " <td id=\"T_168ea_row8_col92\" class=\"data row8 col92\" >1.414103</td>\n",
+ " <td id=\"T_168ea_row8_col93\" class=\"data row8 col93\" >-1.118659</td>\n",
+ " <td id=\"T_168ea_row8_col94\" class=\"data row8 col94\" >0.877353</td>\n",
+ " <td id=\"T_168ea_row8_col95\" class=\"data row8 col95\" >0.500561</td>\n",
+ " <td id=\"T_168ea_row8_col96\" class=\"data row8 col96\" >0.463651</td>\n",
+ " <td id=\"T_168ea_row8_col97\" class=\"data row8 col97\" >-2.034512</td>\n",
+ " <td id=\"T_168ea_row8_col98\" class=\"data row8 col98\" >-0.981683</td>\n",
+ " <td id=\"T_168ea_row8_col99\" class=\"data row8 col99\" >-0.691944</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row9\" class=\"row_heading level2 row9\" >1</th>\n",
+ " <td id=\"T_168ea_row9_col0\" class=\"data row9 col0\" >-1.113376</td>\n",
+ " <td id=\"T_168ea_row9_col1\" class=\"data row9 col1\" >-1.169402</td>\n",
+ " <td id=\"T_168ea_row9_col2\" class=\"data row9 col2\" >0.680539</td>\n",
+ " <td id=\"T_168ea_row9_col3\" class=\"data row9 col3\" >-1.534212</td>\n",
+ " <td id=\"T_168ea_row9_col4\" class=\"data row9 col4\" >1.653817</td>\n",
+ " <td id=\"T_168ea_row9_col5\" class=\"data row9 col5\" >-1.295181</td>\n",
+ " <td id=\"T_168ea_row9_col6\" class=\"data row9 col6\" >-0.566826</td>\n",
+ " <td id=\"T_168ea_row9_col7\" class=\"data row9 col7\" >0.477014</td>\n",
+ " <td id=\"T_168ea_row9_col8\" class=\"data row9 col8\" >1.413371</td>\n",
+ " <td id=\"T_168ea_row9_col9\" class=\"data row9 col9\" >0.517105</td>\n",
+ " <td id=\"T_168ea_row9_col10\" class=\"data row9 col10\" >1.401153</td>\n",
+ " <td id=\"T_168ea_row9_col11\" class=\"data row9 col11\" >-0.872685</td>\n",
+ " <td id=\"T_168ea_row9_col12\" class=\"data row9 col12\" >0.830957</td>\n",
+ " <td id=\"T_168ea_row9_col13\" class=\"data row9 col13\" >0.181507</td>\n",
+ " <td id=\"T_168ea_row9_col14\" class=\"data row9 col14\" >-0.145616</td>\n",
+ " <td id=\"T_168ea_row9_col15\" class=\"data row9 col15\" >0.694592</td>\n",
+ " <td id=\"T_168ea_row9_col16\" class=\"data row9 col16\" >-0.751208</td>\n",
+ " <td id=\"T_168ea_row9_col17\" class=\"data row9 col17\" >0.324444</td>\n",
+ " <td id=\"T_168ea_row9_col18\" class=\"data row9 col18\" >0.681973</td>\n",
+ " <td id=\"T_168ea_row9_col19\" class=\"data row9 col19\" >-0.054972</td>\n",
+ " <td id=\"T_168ea_row9_col20\" class=\"data row9 col20\" >0.917776</td>\n",
+ " <td id=\"T_168ea_row9_col21\" class=\"data row9 col21\" >-1.024810</td>\n",
+ " <td id=\"T_168ea_row9_col22\" class=\"data row9 col22\" >-0.206446</td>\n",
+ " <td id=\"T_168ea_row9_col23\" class=\"data row9 col23\" >-0.600113</td>\n",
+ " <td id=\"T_168ea_row9_col24\" class=\"data row9 col24\" >0.852805</td>\n",
+ " <td id=\"T_168ea_row9_col25\" class=\"data row9 col25\" >1.455109</td>\n",
+ " <td id=\"T_168ea_row9_col26\" class=\"data row9 col26\" >-0.079769</td>\n",
+ " <td id=\"T_168ea_row9_col27\" class=\"data row9 col27\" >0.076076</td>\n",
+ " <td id=\"T_168ea_row9_col28\" class=\"data row9 col28\" >0.207699</td>\n",
+ " <td id=\"T_168ea_row9_col29\" class=\"data row9 col29\" >-1.850458</td>\n",
+ " <td id=\"T_168ea_row9_col30\" class=\"data row9 col30\" >-0.124124</td>\n",
+ " <td id=\"T_168ea_row9_col31\" class=\"data row9 col31\" >-0.610871</td>\n",
+ " <td id=\"T_168ea_row9_col32\" class=\"data row9 col32\" >-0.883362</td>\n",
+ " <td id=\"T_168ea_row9_col33\" class=\"data row9 col33\" >0.219049</td>\n",
+ " <td id=\"T_168ea_row9_col34\" class=\"data row9 col34\" >-0.685094</td>\n",
+ " <td id=\"T_168ea_row9_col35\" class=\"data row9 col35\" >-0.645330</td>\n",
+ " <td id=\"T_168ea_row9_col36\" class=\"data row9 col36\" >-0.242805</td>\n",
+ " <td id=\"T_168ea_row9_col37\" class=\"data row9 col37\" >-0.775602</td>\n",
+ " <td id=\"T_168ea_row9_col38\" class=\"data row9 col38\" >0.233070</td>\n",
+ " <td id=\"T_168ea_row9_col39\" class=\"data row9 col39\" >2.422642</td>\n",
+ " <td id=\"T_168ea_row9_col40\" class=\"data row9 col40\" >-1.423040</td>\n",
+ " <td id=\"T_168ea_row9_col41\" class=\"data row9 col41\" >-0.582421</td>\n",
+ " <td id=\"T_168ea_row9_col42\" class=\"data row9 col42\" >0.968304</td>\n",
+ " <td id=\"T_168ea_row9_col43\" class=\"data row9 col43\" >-0.701025</td>\n",
+ " <td id=\"T_168ea_row9_col44\" class=\"data row9 col44\" >-0.167850</td>\n",
+ " <td id=\"T_168ea_row9_col45\" class=\"data row9 col45\" >0.277264</td>\n",
+ " <td id=\"T_168ea_row9_col46\" class=\"data row9 col46\" >1.301231</td>\n",
+ " <td id=\"T_168ea_row9_col47\" class=\"data row9 col47\" >0.301205</td>\n",
+ " <td id=\"T_168ea_row9_col48\" class=\"data row9 col48\" >-3.081249</td>\n",
+ " <td id=\"T_168ea_row9_col49\" class=\"data row9 col49\" >-0.562868</td>\n",
+ " <td id=\"T_168ea_row9_col50\" class=\"data row9 col50\" >0.192944</td>\n",
+ " <td id=\"T_168ea_row9_col51\" class=\"data row9 col51\" >-0.664592</td>\n",
+ " <td id=\"T_168ea_row9_col52\" class=\"data row9 col52\" >0.565686</td>\n",
+ " <td id=\"T_168ea_row9_col53\" class=\"data row9 col53\" >0.190913</td>\n",
+ " <td id=\"T_168ea_row9_col54\" class=\"data row9 col54\" >-0.841858</td>\n",
+ " <td id=\"T_168ea_row9_col55\" class=\"data row9 col55\" >-1.856545</td>\n",
+ " <td id=\"T_168ea_row9_col56\" class=\"data row9 col56\" >-1.022777</td>\n",
+ " <td id=\"T_168ea_row9_col57\" class=\"data row9 col57\" >1.295968</td>\n",
+ " <td id=\"T_168ea_row9_col58\" class=\"data row9 col58\" >0.451921</td>\n",
+ " <td id=\"T_168ea_row9_col59\" class=\"data row9 col59\" >0.659955</td>\n",
+ " <td id=\"T_168ea_row9_col60\" class=\"data row9 col60\" >0.065818</td>\n",
+ " <td id=\"T_168ea_row9_col61\" class=\"data row9 col61\" >-0.319586</td>\n",
+ " <td id=\"T_168ea_row9_col62\" class=\"data row9 col62\" >0.253495</td>\n",
+ " <td id=\"T_168ea_row9_col63\" class=\"data row9 col63\" >-1.144646</td>\n",
+ " <td id=\"T_168ea_row9_col64\" class=\"data row9 col64\" >-0.483404</td>\n",
+ " <td id=\"T_168ea_row9_col65\" class=\"data row9 col65\" >0.555902</td>\n",
+ " <td id=\"T_168ea_row9_col66\" class=\"data row9 col66\" >0.807069</td>\n",
+ " <td id=\"T_168ea_row9_col67\" class=\"data row9 col67\" >0.714196</td>\n",
+ " <td id=\"T_168ea_row9_col68\" class=\"data row9 col68\" >0.661196</td>\n",
+ " <td id=\"T_168ea_row9_col69\" class=\"data row9 col69\" >0.053667</td>\n",
+ " <td id=\"T_168ea_row9_col70\" class=\"data row9 col70\" >0.346833</td>\n",
+ " <td id=\"T_168ea_row9_col71\" class=\"data row9 col71\" >-1.288977</td>\n",
+ " <td id=\"T_168ea_row9_col72\" class=\"data row9 col72\" >-0.386734</td>\n",
+ " <td id=\"T_168ea_row9_col73\" class=\"data row9 col73\" >-1.262127</td>\n",
+ " <td id=\"T_168ea_row9_col74\" class=\"data row9 col74\" >0.477495</td>\n",
+ " <td id=\"T_168ea_row9_col75\" class=\"data row9 col75\" >-0.494034</td>\n",
+ " <td id=\"T_168ea_row9_col76\" class=\"data row9 col76\" >-0.911414</td>\n",
+ " <td id=\"T_168ea_row9_col77\" class=\"data row9 col77\" >1.152963</td>\n",
+ " <td id=\"T_168ea_row9_col78\" class=\"data row9 col78\" >-0.342365</td>\n",
+ " <td id=\"T_168ea_row9_col79\" class=\"data row9 col79\" >-0.160187</td>\n",
+ " <td id=\"T_168ea_row9_col80\" class=\"data row9 col80\" >0.470054</td>\n",
+ " <td id=\"T_168ea_row9_col81\" class=\"data row9 col81\" >-0.853063</td>\n",
+ " <td id=\"T_168ea_row9_col82\" class=\"data row9 col82\" >-1.387949</td>\n",
+ " <td id=\"T_168ea_row9_col83\" class=\"data row9 col83\" >-0.257257</td>\n",
+ " <td id=\"T_168ea_row9_col84\" class=\"data row9 col84\" >-1.030690</td>\n",
+ " <td id=\"T_168ea_row9_col85\" class=\"data row9 col85\" >-0.110210</td>\n",
+ " <td id=\"T_168ea_row9_col86\" class=\"data row9 col86\" >0.328911</td>\n",
+ " <td id=\"T_168ea_row9_col87\" class=\"data row9 col87\" >-0.555923</td>\n",
+ " <td id=\"T_168ea_row9_col88\" class=\"data row9 col88\" >0.987713</td>\n",
+ " <td id=\"T_168ea_row9_col89\" class=\"data row9 col89\" >-0.501957</td>\n",
+ " <td id=\"T_168ea_row9_col90\" class=\"data row9 col90\" >2.069887</td>\n",
+ " <td id=\"T_168ea_row9_col91\" class=\"data row9 col91\" >-0.067503</td>\n",
+ " <td id=\"T_168ea_row9_col92\" class=\"data row9 col92\" >0.316029</td>\n",
+ " <td id=\"T_168ea_row9_col93\" class=\"data row9 col93\" >-1.506232</td>\n",
+ " <td id=\"T_168ea_row9_col94\" class=\"data row9 col94\" >2.201621</td>\n",
+ " <td id=\"T_168ea_row9_col95\" class=\"data row9 col95\" >0.492097</td>\n",
+ " <td id=\"T_168ea_row9_col96\" class=\"data row9 col96\" >-0.085193</td>\n",
+ " <td id=\"T_168ea_row9_col97\" class=\"data row9 col97\" >-0.977822</td>\n",
+ " <td id=\"T_168ea_row9_col98\" class=\"data row9 col98\" >1.039147</td>\n",
+ " <td id=\"T_168ea_row9_col99\" class=\"data row9 col99\" >-0.653932</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row10\" class=\"row_heading level2 row10\" >2</th>\n",
+ " <td id=\"T_168ea_row10_col0\" class=\"data row10 col0\" >-0.405638</td>\n",
+ " <td id=\"T_168ea_row10_col1\" class=\"data row10 col1\" >-1.402027</td>\n",
+ " <td id=\"T_168ea_row10_col2\" class=\"data row10 col2\" >-1.166242</td>\n",
+ " <td id=\"T_168ea_row10_col3\" class=\"data row10 col3\" >1.306184</td>\n",
+ " <td id=\"T_168ea_row10_col4\" class=\"data row10 col4\" >0.856283</td>\n",
+ " <td id=\"T_168ea_row10_col5\" class=\"data row10 col5\" >-1.236170</td>\n",
+ " <td id=\"T_168ea_row10_col6\" class=\"data row10 col6\" >-0.646721</td>\n",
+ " <td id=\"T_168ea_row10_col7\" class=\"data row10 col7\" >-1.474064</td>\n",
+ " <td id=\"T_168ea_row10_col8\" class=\"data row10 col8\" >0.082960</td>\n",
+ " <td id=\"T_168ea_row10_col9\" class=\"data row10 col9\" >0.090310</td>\n",
+ " <td id=\"T_168ea_row10_col10\" class=\"data row10 col10\" >-0.169977</td>\n",
+ " <td id=\"T_168ea_row10_col11\" class=\"data row10 col11\" >0.406345</td>\n",
+ " <td id=\"T_168ea_row10_col12\" class=\"data row10 col12\" >0.915427</td>\n",
+ " <td id=\"T_168ea_row10_col13\" class=\"data row10 col13\" >-0.974503</td>\n",
+ " <td id=\"T_168ea_row10_col14\" class=\"data row10 col14\" >0.271637</td>\n",
+ " <td id=\"T_168ea_row10_col15\" class=\"data row10 col15\" >1.539184</td>\n",
+ " <td id=\"T_168ea_row10_col16\" class=\"data row10 col16\" >-0.098866</td>\n",
+ " <td id=\"T_168ea_row10_col17\" class=\"data row10 col17\" >-0.525149</td>\n",
+ " <td id=\"T_168ea_row10_col18\" class=\"data row10 col18\" >1.063933</td>\n",
+ " <td id=\"T_168ea_row10_col19\" class=\"data row10 col19\" >0.085827</td>\n",
+ " <td id=\"T_168ea_row10_col20\" class=\"data row10 col20\" >-0.129622</td>\n",
+ " <td id=\"T_168ea_row10_col21\" class=\"data row10 col21\" >0.947959</td>\n",
+ " <td id=\"T_168ea_row10_col22\" class=\"data row10 col22\" >-0.072496</td>\n",
+ " <td id=\"T_168ea_row10_col23\" class=\"data row10 col23\" >-0.237592</td>\n",
+ " <td id=\"T_168ea_row10_col24\" class=\"data row10 col24\" >0.012549</td>\n",
+ " <td id=\"T_168ea_row10_col25\" class=\"data row10 col25\" >1.065761</td>\n",
+ " <td id=\"T_168ea_row10_col26\" class=\"data row10 col26\" >0.996596</td>\n",
+ " <td id=\"T_168ea_row10_col27\" class=\"data row10 col27\" >-0.172481</td>\n",
+ " <td id=\"T_168ea_row10_col28\" class=\"data row10 col28\" >2.583139</td>\n",
+ " <td id=\"T_168ea_row10_col29\" class=\"data row10 col29\" >-0.028578</td>\n",
+ " <td id=\"T_168ea_row10_col30\" class=\"data row10 col30\" >-0.254856</td>\n",
+ " <td id=\"T_168ea_row10_col31\" class=\"data row10 col31\" >1.328794</td>\n",
+ " <td id=\"T_168ea_row10_col32\" class=\"data row10 col32\" >-1.592951</td>\n",
+ " <td id=\"T_168ea_row10_col33\" class=\"data row10 col33\" >2.434350</td>\n",
+ " <td id=\"T_168ea_row10_col34\" class=\"data row10 col34\" >-0.341500</td>\n",
+ " <td id=\"T_168ea_row10_col35\" class=\"data row10 col35\" >-0.307719</td>\n",
+ " <td id=\"T_168ea_row10_col36\" class=\"data row10 col36\" >-1.333273</td>\n",
+ " <td id=\"T_168ea_row10_col37\" class=\"data row10 col37\" >-1.100845</td>\n",
+ " <td id=\"T_168ea_row10_col38\" class=\"data row10 col38\" >0.209097</td>\n",
+ " <td id=\"T_168ea_row10_col39\" class=\"data row10 col39\" >1.734777</td>\n",
+ " <td id=\"T_168ea_row10_col40\" class=\"data row10 col40\" >0.639632</td>\n",
+ " <td id=\"T_168ea_row10_col41\" class=\"data row10 col41\" >0.424779</td>\n",
+ " <td id=\"T_168ea_row10_col42\" class=\"data row10 col42\" >-0.129327</td>\n",
+ " <td id=\"T_168ea_row10_col43\" class=\"data row10 col43\" >0.905029</td>\n",
+ " <td id=\"T_168ea_row10_col44\" class=\"data row10 col44\" >-0.482909</td>\n",
+ " <td id=\"T_168ea_row10_col45\" class=\"data row10 col45\" >1.731628</td>\n",
+ " <td id=\"T_168ea_row10_col46\" class=\"data row10 col46\" >-2.783425</td>\n",
+ " <td id=\"T_168ea_row10_col47\" class=\"data row10 col47\" >-0.333677</td>\n",
+ " <td id=\"T_168ea_row10_col48\" class=\"data row10 col48\" >-0.110895</td>\n",
+ " <td id=\"T_168ea_row10_col49\" class=\"data row10 col49\" >1.212636</td>\n",
+ " <td id=\"T_168ea_row10_col50\" class=\"data row10 col50\" >-0.208412</td>\n",
+ " <td id=\"T_168ea_row10_col51\" class=\"data row10 col51\" >0.427117</td>\n",
+ " <td id=\"T_168ea_row10_col52\" class=\"data row10 col52\" >1.348563</td>\n",
+ " <td id=\"T_168ea_row10_col53\" class=\"data row10 col53\" >0.043859</td>\n",
+ " <td id=\"T_168ea_row10_col54\" class=\"data row10 col54\" >1.772519</td>\n",
+ " <td id=\"T_168ea_row10_col55\" class=\"data row10 col55\" >-1.416106</td>\n",
+ " <td id=\"T_168ea_row10_col56\" class=\"data row10 col56\" >0.401155</td>\n",
+ " <td id=\"T_168ea_row10_col57\" class=\"data row10 col57\" >0.807157</td>\n",
+ " <td id=\"T_168ea_row10_col58\" class=\"data row10 col58\" >0.303427</td>\n",
+ " <td id=\"T_168ea_row10_col59\" class=\"data row10 col59\" >-1.246288</td>\n",
+ " <td id=\"T_168ea_row10_col60\" class=\"data row10 col60\" >0.178774</td>\n",
+ " <td id=\"T_168ea_row10_col61\" class=\"data row10 col61\" >-0.066126</td>\n",
+ " <td id=\"T_168ea_row10_col62\" class=\"data row10 col62\" >-1.862288</td>\n",
+ " <td id=\"T_168ea_row10_col63\" class=\"data row10 col63\" >1.241295</td>\n",
+ " <td id=\"T_168ea_row10_col64\" class=\"data row10 col64\" >0.377021</td>\n",
+ " <td id=\"T_168ea_row10_col65\" class=\"data row10 col65\" >-0.822320</td>\n",
+ " <td id=\"T_168ea_row10_col66\" class=\"data row10 col66\" >-0.749014</td>\n",
+ " <td id=\"T_168ea_row10_col67\" class=\"data row10 col67\" >1.463652</td>\n",
+ " <td id=\"T_168ea_row10_col68\" class=\"data row10 col68\" >1.602268</td>\n",
+ " <td id=\"T_168ea_row10_col69\" class=\"data row10 col69\" >-1.043877</td>\n",
+ " <td id=\"T_168ea_row10_col70\" class=\"data row10 col70\" >1.185290</td>\n",
+ " <td id=\"T_168ea_row10_col71\" class=\"data row10 col71\" >-0.565783</td>\n",
+ " <td id=\"T_168ea_row10_col72\" class=\"data row10 col72\" >-1.076879</td>\n",
+ " <td id=\"T_168ea_row10_col73\" class=\"data row10 col73\" >1.360241</td>\n",
+ " <td id=\"T_168ea_row10_col74\" class=\"data row10 col74\" >-0.121991</td>\n",
+ " <td id=\"T_168ea_row10_col75\" class=\"data row10 col75\" >0.991043</td>\n",
+ " <td id=\"T_168ea_row10_col76\" class=\"data row10 col76\" >1.007952</td>\n",
+ " <td id=\"T_168ea_row10_col77\" class=\"data row10 col77\" >0.450185</td>\n",
+ " <td id=\"T_168ea_row10_col78\" class=\"data row10 col78\" >-0.744376</td>\n",
+ " <td id=\"T_168ea_row10_col79\" class=\"data row10 col79\" >1.388876</td>\n",
+ " <td id=\"T_168ea_row10_col80\" class=\"data row10 col80\" >-0.316847</td>\n",
+ " <td id=\"T_168ea_row10_col81\" class=\"data row10 col81\" >-0.841655</td>\n",
+ " <td id=\"T_168ea_row10_col82\" class=\"data row10 col82\" >-1.056842</td>\n",
+ " <td id=\"T_168ea_row10_col83\" class=\"data row10 col83\" >-0.500226</td>\n",
+ " <td id=\"T_168ea_row10_col84\" class=\"data row10 col84\" >0.096959</td>\n",
+ " <td id=\"T_168ea_row10_col85\" class=\"data row10 col85\" >1.176896</td>\n",
+ " <td id=\"T_168ea_row10_col86\" class=\"data row10 col86\" >-2.939652</td>\n",
+ " <td id=\"T_168ea_row10_col87\" class=\"data row10 col87\" >1.792213</td>\n",
+ " <td id=\"T_168ea_row10_col88\" class=\"data row10 col88\" >0.316340</td>\n",
+ " <td id=\"T_168ea_row10_col89\" class=\"data row10 col89\" >0.303218</td>\n",
+ " <td id=\"T_168ea_row10_col90\" class=\"data row10 col90\" >1.024967</td>\n",
+ " <td id=\"T_168ea_row10_col91\" class=\"data row10 col91\" >-0.590871</td>\n",
+ " <td id=\"T_168ea_row10_col92\" class=\"data row10 col92\" >-0.453326</td>\n",
+ " <td id=\"T_168ea_row10_col93\" class=\"data row10 col93\" >-0.795981</td>\n",
+ " <td id=\"T_168ea_row10_col94\" class=\"data row10 col94\" >-0.393301</td>\n",
+ " <td id=\"T_168ea_row10_col95\" class=\"data row10 col95\" >-0.374372</td>\n",
+ " <td id=\"T_168ea_row10_col96\" class=\"data row10 col96\" >-1.270199</td>\n",
+ " <td id=\"T_168ea_row10_col97\" class=\"data row10 col97\" >1.618372</td>\n",
+ " <td id=\"T_168ea_row10_col98\" class=\"data row10 col98\" >1.197727</td>\n",
+ " <td id=\"T_168ea_row10_col99\" class=\"data row10 col99\" >-0.914863</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row11\" class=\"row_heading level2 row11\" >3</th>\n",
+ " <td id=\"T_168ea_row11_col0\" class=\"data row11 col0\" >-0.625210</td>\n",
+ " <td id=\"T_168ea_row11_col1\" class=\"data row11 col1\" >0.288911</td>\n",
+ " <td id=\"T_168ea_row11_col2\" class=\"data row11 col2\" >0.288374</td>\n",
+ " <td id=\"T_168ea_row11_col3\" class=\"data row11 col3\" >-1.372667</td>\n",
+ " <td id=\"T_168ea_row11_col4\" class=\"data row11 col4\" >-0.591395</td>\n",
+ " <td id=\"T_168ea_row11_col5\" class=\"data row11 col5\" >-0.478942</td>\n",
+ " <td id=\"T_168ea_row11_col6\" class=\"data row11 col6\" >1.335664</td>\n",
+ " <td id=\"T_168ea_row11_col7\" class=\"data row11 col7\" >-0.459855</td>\n",
+ " <td id=\"T_168ea_row11_col8\" class=\"data row11 col8\" >-1.615975</td>\n",
+ " <td id=\"T_168ea_row11_col9\" class=\"data row11 col9\" >-1.189676</td>\n",
+ " <td id=\"T_168ea_row11_col10\" class=\"data row11 col10\" >0.374767</td>\n",
+ " <td id=\"T_168ea_row11_col11\" class=\"data row11 col11\" >-2.488733</td>\n",
+ " <td id=\"T_168ea_row11_col12\" class=\"data row11 col12\" >0.586656</td>\n",
+ " <td id=\"T_168ea_row11_col13\" class=\"data row11 col13\" >-1.422008</td>\n",
+ " <td id=\"T_168ea_row11_col14\" class=\"data row11 col14\" >0.496030</td>\n",
+ " <td id=\"T_168ea_row11_col15\" class=\"data row11 col15\" >1.911128</td>\n",
+ " <td id=\"T_168ea_row11_col16\" class=\"data row11 col16\" >-0.560660</td>\n",
+ " <td id=\"T_168ea_row11_col17\" class=\"data row11 col17\" >-0.499614</td>\n",
+ " <td id=\"T_168ea_row11_col18\" class=\"data row11 col18\" >-0.372171</td>\n",
+ " <td id=\"T_168ea_row11_col19\" class=\"data row11 col19\" >-1.833069</td>\n",
+ " <td id=\"T_168ea_row11_col20\" class=\"data row11 col20\" >0.237124</td>\n",
+ " <td id=\"T_168ea_row11_col21\" class=\"data row11 col21\" >-0.944446</td>\n",
+ " <td id=\"T_168ea_row11_col22\" class=\"data row11 col22\" >0.912140</td>\n",
+ " <td id=\"T_168ea_row11_col23\" class=\"data row11 col23\" >0.359790</td>\n",
+ " <td id=\"T_168ea_row11_col24\" class=\"data row11 col24\" >-1.359235</td>\n",
+ " <td id=\"T_168ea_row11_col25\" class=\"data row11 col25\" >0.166966</td>\n",
+ " <td id=\"T_168ea_row11_col26\" class=\"data row11 col26\" >-0.047107</td>\n",
+ " <td id=\"T_168ea_row11_col27\" class=\"data row11 col27\" >-0.279789</td>\n",
+ " <td id=\"T_168ea_row11_col28\" class=\"data row11 col28\" >-0.594454</td>\n",
+ " <td id=\"T_168ea_row11_col29\" class=\"data row11 col29\" >-0.739013</td>\n",
+ " <td id=\"T_168ea_row11_col30\" class=\"data row11 col30\" >-1.527645</td>\n",
+ " <td id=\"T_168ea_row11_col31\" class=\"data row11 col31\" >0.401668</td>\n",
+ " <td id=\"T_168ea_row11_col32\" class=\"data row11 col32\" >1.791252</td>\n",
+ " <td id=\"T_168ea_row11_col33\" class=\"data row11 col33\" >-2.774848</td>\n",
+ " <td id=\"T_168ea_row11_col34\" class=\"data row11 col34\" >0.523873</td>\n",
+ " <td id=\"T_168ea_row11_col35\" class=\"data row11 col35\" >2.207585</td>\n",
+ " <td id=\"T_168ea_row11_col36\" class=\"data row11 col36\" >0.488999</td>\n",
+ " <td id=\"T_168ea_row11_col37\" class=\"data row11 col37\" >-0.339283</td>\n",
+ " <td id=\"T_168ea_row11_col38\" class=\"data row11 col38\" >0.131711</td>\n",
+ " <td id=\"T_168ea_row11_col39\" class=\"data row11 col39\" >0.018409</td>\n",
+ " <td id=\"T_168ea_row11_col40\" class=\"data row11 col40\" >1.186551</td>\n",
+ " <td id=\"T_168ea_row11_col41\" class=\"data row11 col41\" >-0.424318</td>\n",
+ " <td id=\"T_168ea_row11_col42\" class=\"data row11 col42\" >1.554994</td>\n",
+ " <td id=\"T_168ea_row11_col43\" class=\"data row11 col43\" >-0.205917</td>\n",
+ " <td id=\"T_168ea_row11_col44\" class=\"data row11 col44\" >-0.934975</td>\n",
+ " <td id=\"T_168ea_row11_col45\" class=\"data row11 col45\" >0.654102</td>\n",
+ " <td id=\"T_168ea_row11_col46\" class=\"data row11 col46\" >-1.227761</td>\n",
+ " <td id=\"T_168ea_row11_col47\" class=\"data row11 col47\" >-0.461025</td>\n",
+ " <td id=\"T_168ea_row11_col48\" class=\"data row11 col48\" >-0.421201</td>\n",
+ " <td id=\"T_168ea_row11_col49\" class=\"data row11 col49\" >-0.058615</td>\n",
+ " <td id=\"T_168ea_row11_col50\" class=\"data row11 col50\" >-0.584563</td>\n",
+ " <td id=\"T_168ea_row11_col51\" class=\"data row11 col51\" >0.336913</td>\n",
+ " <td id=\"T_168ea_row11_col52\" class=\"data row11 col52\" >-0.477102</td>\n",
+ " <td id=\"T_168ea_row11_col53\" class=\"data row11 col53\" >-1.381463</td>\n",
+ " <td id=\"T_168ea_row11_col54\" class=\"data row11 col54\" >0.757745</td>\n",
+ " <td id=\"T_168ea_row11_col55\" class=\"data row11 col55\" >-0.268968</td>\n",
+ " <td id=\"T_168ea_row11_col56\" class=\"data row11 col56\" >0.034870</td>\n",
+ " <td id=\"T_168ea_row11_col57\" class=\"data row11 col57\" >1.231686</td>\n",
+ " <td id=\"T_168ea_row11_col58\" class=\"data row11 col58\" >0.236600</td>\n",
+ " <td id=\"T_168ea_row11_col59\" class=\"data row11 col59\" >1.234720</td>\n",
+ " <td id=\"T_168ea_row11_col60\" class=\"data row11 col60\" >-0.040247</td>\n",
+ " <td id=\"T_168ea_row11_col61\" class=\"data row11 col61\" >0.029582</td>\n",
+ " <td id=\"T_168ea_row11_col62\" class=\"data row11 col62\" >1.034905</td>\n",
+ " <td id=\"T_168ea_row11_col63\" class=\"data row11 col63\" >0.380204</td>\n",
+ " <td id=\"T_168ea_row11_col64\" class=\"data row11 col64\" >-0.012108</td>\n",
+ " <td id=\"T_168ea_row11_col65\" class=\"data row11 col65\" >-0.859511</td>\n",
+ " <td id=\"T_168ea_row11_col66\" class=\"data row11 col66\" >-0.990340</td>\n",
+ " <td id=\"T_168ea_row11_col67\" class=\"data row11 col67\" >-1.205172</td>\n",
+ " <td id=\"T_168ea_row11_col68\" class=\"data row11 col68\" >-1.030178</td>\n",
+ " <td id=\"T_168ea_row11_col69\" class=\"data row11 col69\" >0.426676</td>\n",
+ " <td id=\"T_168ea_row11_col70\" class=\"data row11 col70\" >0.497796</td>\n",
+ " <td id=\"T_168ea_row11_col71\" class=\"data row11 col71\" >-0.876808</td>\n",
+ " <td id=\"T_168ea_row11_col72\" class=\"data row11 col72\" >0.957963</td>\n",
+ " <td id=\"T_168ea_row11_col73\" class=\"data row11 col73\" >0.173016</td>\n",
+ " <td id=\"T_168ea_row11_col74\" class=\"data row11 col74\" >0.131612</td>\n",
+ " <td id=\"T_168ea_row11_col75\" class=\"data row11 col75\" >-1.003556</td>\n",
+ " <td id=\"T_168ea_row11_col76\" class=\"data row11 col76\" >-1.069908</td>\n",
+ " <td id=\"T_168ea_row11_col77\" class=\"data row11 col77\" >-1.799207</td>\n",
+ " <td id=\"T_168ea_row11_col78\" class=\"data row11 col78\" >1.429598</td>\n",
+ " <td id=\"T_168ea_row11_col79\" class=\"data row11 col79\" >-0.116015</td>\n",
+ " <td id=\"T_168ea_row11_col80\" class=\"data row11 col80\" >-1.454980</td>\n",
+ " <td id=\"T_168ea_row11_col81\" class=\"data row11 col81\" >0.261917</td>\n",
+ " <td id=\"T_168ea_row11_col82\" class=\"data row11 col82\" >0.444412</td>\n",
+ " <td id=\"T_168ea_row11_col83\" class=\"data row11 col83\" >0.273290</td>\n",
+ " <td id=\"T_168ea_row11_col84\" class=\"data row11 col84\" >0.844115</td>\n",
+ " <td id=\"T_168ea_row11_col85\" class=\"data row11 col85\" >0.218745</td>\n",
+ " <td id=\"T_168ea_row11_col86\" class=\"data row11 col86\" >-1.033350</td>\n",
+ " <td id=\"T_168ea_row11_col87\" class=\"data row11 col87\" >-1.188295</td>\n",
+ " <td id=\"T_168ea_row11_col88\" class=\"data row11 col88\" >0.058373</td>\n",
+ " <td id=\"T_168ea_row11_col89\" class=\"data row11 col89\" >0.800523</td>\n",
+ " <td id=\"T_168ea_row11_col90\" class=\"data row11 col90\" >-1.627068</td>\n",
+ " <td id=\"T_168ea_row11_col91\" class=\"data row11 col91\" >0.861651</td>\n",
+ " <td id=\"T_168ea_row11_col92\" class=\"data row11 col92\" >0.871018</td>\n",
+ " <td id=\"T_168ea_row11_col93\" class=\"data row11 col93\" >-0.003733</td>\n",
+ " <td id=\"T_168ea_row11_col94\" class=\"data row11 col94\" >-0.243354</td>\n",
+ " <td id=\"T_168ea_row11_col95\" class=\"data row11 col95\" >0.947296</td>\n",
+ " <td id=\"T_168ea_row11_col96\" class=\"data row11 col96\" >0.509406</td>\n",
+ " <td id=\"T_168ea_row11_col97\" class=\"data row11 col97\" >0.044546</td>\n",
+ " <td id=\"T_168ea_row11_col98\" class=\"data row11 col98\" >0.266896</td>\n",
+ " <td id=\"T_168ea_row11_col99\" class=\"data row11 col99\" >1.337165</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level1_row12\" class=\"row_heading level1 row12\" rowspan=\"4\">1</th>\n",
+ " <th id=\"T_168ea_level2_row12\" class=\"row_heading level2 row12\" >0</th>\n",
+ " <td id=\"T_168ea_row12_col0\" class=\"data row12 col0\" >0.699142</td>\n",
+ " <td id=\"T_168ea_row12_col1\" class=\"data row12 col1\" >-1.928033</td>\n",
+ " <td id=\"T_168ea_row12_col2\" class=\"data row12 col2\" >0.105363</td>\n",
+ " <td id=\"T_168ea_row12_col3\" class=\"data row12 col3\" >1.042322</td>\n",
+ " <td id=\"T_168ea_row12_col4\" class=\"data row12 col4\" >0.715206</td>\n",
+ " <td id=\"T_168ea_row12_col5\" class=\"data row12 col5\" >-0.763783</td>\n",
+ " <td id=\"T_168ea_row12_col6\" class=\"data row12 col6\" >0.098798</td>\n",
+ " <td id=\"T_168ea_row12_col7\" class=\"data row12 col7\" >-1.157898</td>\n",
+ " <td id=\"T_168ea_row12_col8\" class=\"data row12 col8\" >0.134105</td>\n",
+ " <td id=\"T_168ea_row12_col9\" class=\"data row12 col9\" >0.042041</td>\n",
+ " <td id=\"T_168ea_row12_col10\" class=\"data row12 col10\" >0.674826</td>\n",
+ " <td id=\"T_168ea_row12_col11\" class=\"data row12 col11\" >0.165649</td>\n",
+ " <td id=\"T_168ea_row12_col12\" class=\"data row12 col12\" >-1.622970</td>\n",
+ " <td id=\"T_168ea_row12_col13\" class=\"data row12 col13\" >-3.131274</td>\n",
+ " <td id=\"T_168ea_row12_col14\" class=\"data row12 col14\" >0.597649</td>\n",
+ " <td id=\"T_168ea_row12_col15\" class=\"data row12 col15\" >-1.880331</td>\n",
+ " <td id=\"T_168ea_row12_col16\" class=\"data row12 col16\" >0.663980</td>\n",
+ " <td id=\"T_168ea_row12_col17\" class=\"data row12 col17\" >-0.256033</td>\n",
+ " <td id=\"T_168ea_row12_col18\" class=\"data row12 col18\" >-1.524058</td>\n",
+ " <td id=\"T_168ea_row12_col19\" class=\"data row12 col19\" >0.492799</td>\n",
+ " <td id=\"T_168ea_row12_col20\" class=\"data row12 col20\" >0.221163</td>\n",
+ " <td id=\"T_168ea_row12_col21\" class=\"data row12 col21\" >0.429622</td>\n",
+ " <td id=\"T_168ea_row12_col22\" class=\"data row12 col22\" >-0.659584</td>\n",
+ " <td id=\"T_168ea_row12_col23\" class=\"data row12 col23\" >1.264506</td>\n",
+ " <td id=\"T_168ea_row12_col24\" class=\"data row12 col24\" >-0.032131</td>\n",
+ " <td id=\"T_168ea_row12_col25\" class=\"data row12 col25\" >-2.114907</td>\n",
+ " <td id=\"T_168ea_row12_col26\" class=\"data row12 col26\" >-0.264043</td>\n",
+ " <td id=\"T_168ea_row12_col27\" class=\"data row12 col27\" >0.457835</td>\n",
+ " <td id=\"T_168ea_row12_col28\" class=\"data row12 col28\" >-0.676837</td>\n",
+ " <td id=\"T_168ea_row12_col29\" class=\"data row12 col29\" >-0.629003</td>\n",
+ " <td id=\"T_168ea_row12_col30\" class=\"data row12 col30\" >0.489145</td>\n",
+ " <td id=\"T_168ea_row12_col31\" class=\"data row12 col31\" >-0.551686</td>\n",
+ " <td id=\"T_168ea_row12_col32\" class=\"data row12 col32\" >0.942622</td>\n",
+ " <td id=\"T_168ea_row12_col33\" class=\"data row12 col33\" >-0.512043</td>\n",
+ " <td id=\"T_168ea_row12_col34\" class=\"data row12 col34\" >-0.455893</td>\n",
+ " <td id=\"T_168ea_row12_col35\" class=\"data row12 col35\" >0.021244</td>\n",
+ " <td id=\"T_168ea_row12_col36\" class=\"data row12 col36\" >-0.178035</td>\n",
+ " <td id=\"T_168ea_row12_col37\" class=\"data row12 col37\" >-2.498073</td>\n",
+ " <td id=\"T_168ea_row12_col38\" class=\"data row12 col38\" >-0.171292</td>\n",
+ " <td id=\"T_168ea_row12_col39\" class=\"data row12 col39\" >0.323510</td>\n",
+ " <td id=\"T_168ea_row12_col40\" class=\"data row12 col40\" >-0.545163</td>\n",
+ " <td id=\"T_168ea_row12_col41\" class=\"data row12 col41\" >-0.668909</td>\n",
+ " <td id=\"T_168ea_row12_col42\" class=\"data row12 col42\" >-0.150031</td>\n",
+ " <td id=\"T_168ea_row12_col43\" class=\"data row12 col43\" >0.521620</td>\n",
+ " <td id=\"T_168ea_row12_col44\" class=\"data row12 col44\" >-0.428980</td>\n",
+ " <td id=\"T_168ea_row12_col45\" class=\"data row12 col45\" >0.676463</td>\n",
+ " <td id=\"T_168ea_row12_col46\" class=\"data row12 col46\" >0.369081</td>\n",
+ " <td id=\"T_168ea_row12_col47\" class=\"data row12 col47\" >-0.724832</td>\n",
+ " <td id=\"T_168ea_row12_col48\" class=\"data row12 col48\" >0.793542</td>\n",
+ " <td id=\"T_168ea_row12_col49\" class=\"data row12 col49\" >1.237422</td>\n",
+ " <td id=\"T_168ea_row12_col50\" class=\"data row12 col50\" >0.401275</td>\n",
+ " <td id=\"T_168ea_row12_col51\" class=\"data row12 col51\" >2.141523</td>\n",
+ " <td id=\"T_168ea_row12_col52\" class=\"data row12 col52\" >0.249012</td>\n",
+ " <td id=\"T_168ea_row12_col53\" class=\"data row12 col53\" >0.486755</td>\n",
+ " <td id=\"T_168ea_row12_col54\" class=\"data row12 col54\" >-0.163274</td>\n",
+ " <td id=\"T_168ea_row12_col55\" class=\"data row12 col55\" >0.592222</td>\n",
+ " <td id=\"T_168ea_row12_col56\" class=\"data row12 col56\" >-0.292600</td>\n",
+ " <td id=\"T_168ea_row12_col57\" class=\"data row12 col57\" >-0.547168</td>\n",
+ " <td id=\"T_168ea_row12_col58\" class=\"data row12 col58\" >0.619104</td>\n",
+ " <td id=\"T_168ea_row12_col59\" class=\"data row12 col59\" >-0.013605</td>\n",
+ " <td id=\"T_168ea_row12_col60\" class=\"data row12 col60\" >0.776734</td>\n",
+ " <td id=\"T_168ea_row12_col61\" class=\"data row12 col61\" >0.131424</td>\n",
+ " <td id=\"T_168ea_row12_col62\" class=\"data row12 col62\" >1.189480</td>\n",
+ " <td id=\"T_168ea_row12_col63\" class=\"data row12 col63\" >-0.666317</td>\n",
+ " <td id=\"T_168ea_row12_col64\" class=\"data row12 col64\" >-0.939036</td>\n",
+ " <td id=\"T_168ea_row12_col65\" class=\"data row12 col65\" >1.105515</td>\n",
+ " <td id=\"T_168ea_row12_col66\" class=\"data row12 col66\" >0.621452</td>\n",
+ " <td id=\"T_168ea_row12_col67\" class=\"data row12 col67\" >1.586605</td>\n",
+ " <td id=\"T_168ea_row12_col68\" class=\"data row12 col68\" >-0.760970</td>\n",
+ " <td id=\"T_168ea_row12_col69\" class=\"data row12 col69\" >1.649646</td>\n",
+ " <td id=\"T_168ea_row12_col70\" class=\"data row12 col70\" >0.283199</td>\n",
+ " <td id=\"T_168ea_row12_col71\" class=\"data row12 col71\" >1.275812</td>\n",
+ " <td id=\"T_168ea_row12_col72\" class=\"data row12 col72\" >-0.452012</td>\n",
+ " <td id=\"T_168ea_row12_col73\" class=\"data row12 col73\" >0.301361</td>\n",
+ " <td id=\"T_168ea_row12_col74\" class=\"data row12 col74\" >-0.976951</td>\n",
+ " <td id=\"T_168ea_row12_col75\" class=\"data row12 col75\" >-0.268106</td>\n",
+ " <td id=\"T_168ea_row12_col76\" class=\"data row12 col76\" >-0.079255</td>\n",
+ " <td id=\"T_168ea_row12_col77\" class=\"data row12 col77\" >-1.258332</td>\n",
+ " <td id=\"T_168ea_row12_col78\" class=\"data row12 col78\" >2.216658</td>\n",
+ " <td id=\"T_168ea_row12_col79\" class=\"data row12 col79\" >-1.175988</td>\n",
+ " <td id=\"T_168ea_row12_col80\" class=\"data row12 col80\" >-0.863497</td>\n",
+ " <td id=\"T_168ea_row12_col81\" class=\"data row12 col81\" >-1.653022</td>\n",
+ " <td id=\"T_168ea_row12_col82\" class=\"data row12 col82\" >-0.561514</td>\n",
+ " <td id=\"T_168ea_row12_col83\" class=\"data row12 col83\" >0.450753</td>\n",
+ " <td id=\"T_168ea_row12_col84\" class=\"data row12 col84\" >0.417200</td>\n",
+ " <td id=\"T_168ea_row12_col85\" class=\"data row12 col85\" >0.094676</td>\n",
+ " <td id=\"T_168ea_row12_col86\" class=\"data row12 col86\" >-2.231054</td>\n",
+ " <td id=\"T_168ea_row12_col87\" class=\"data row12 col87\" >1.316862</td>\n",
+ " <td id=\"T_168ea_row12_col88\" class=\"data row12 col88\" >-0.477441</td>\n",
+ " <td id=\"T_168ea_row12_col89\" class=\"data row12 col89\" >0.646654</td>\n",
+ " <td id=\"T_168ea_row12_col90\" class=\"data row12 col90\" >-0.200252</td>\n",
+ " <td id=\"T_168ea_row12_col91\" class=\"data row12 col91\" >1.074354</td>\n",
+ " <td id=\"T_168ea_row12_col92\" class=\"data row12 col92\" >-0.058176</td>\n",
+ " <td id=\"T_168ea_row12_col93\" class=\"data row12 col93\" >0.120990</td>\n",
+ " <td id=\"T_168ea_row12_col94\" class=\"data row12 col94\" >0.222522</td>\n",
+ " <td id=\"T_168ea_row12_col95\" class=\"data row12 col95\" >-0.179507</td>\n",
+ " <td id=\"T_168ea_row12_col96\" class=\"data row12 col96\" >0.421655</td>\n",
+ " <td id=\"T_168ea_row12_col97\" class=\"data row12 col97\" >-0.914341</td>\n",
+ " <td id=\"T_168ea_row12_col98\" class=\"data row12 col98\" >-0.234178</td>\n",
+ " <td id=\"T_168ea_row12_col99\" class=\"data row12 col99\" >0.741524</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row13\" class=\"row_heading level2 row13\" >1</th>\n",
+ " <td id=\"T_168ea_row13_col0\" class=\"data row13 col0\" >0.932714</td>\n",
+ " <td id=\"T_168ea_row13_col1\" class=\"data row13 col1\" >1.423761</td>\n",
+ " <td id=\"T_168ea_row13_col2\" class=\"data row13 col2\" >-1.280835</td>\n",
+ " <td id=\"T_168ea_row13_col3\" class=\"data row13 col3\" >0.347882</td>\n",
+ " <td id=\"T_168ea_row13_col4\" class=\"data row13 col4\" >-0.863171</td>\n",
+ " <td id=\"T_168ea_row13_col5\" class=\"data row13 col5\" >-0.852580</td>\n",
+ " <td id=\"T_168ea_row13_col6\" class=\"data row13 col6\" >1.044933</td>\n",
+ " <td id=\"T_168ea_row13_col7\" class=\"data row13 col7\" >2.094536</td>\n",
+ " <td id=\"T_168ea_row13_col8\" class=\"data row13 col8\" >0.806206</td>\n",
+ " <td id=\"T_168ea_row13_col9\" class=\"data row13 col9\" >0.416201</td>\n",
+ " <td id=\"T_168ea_row13_col10\" class=\"data row13 col10\" >-1.109503</td>\n",
+ " <td id=\"T_168ea_row13_col11\" class=\"data row13 col11\" >0.145302</td>\n",
+ " <td id=\"T_168ea_row13_col12\" class=\"data row13 col12\" >-0.996871</td>\n",
+ " <td id=\"T_168ea_row13_col13\" class=\"data row13 col13\" >0.325456</td>\n",
+ " <td id=\"T_168ea_row13_col14\" class=\"data row13 col14\" >-0.605081</td>\n",
+ " <td id=\"T_168ea_row13_col15\" class=\"data row13 col15\" >1.175326</td>\n",
+ " <td id=\"T_168ea_row13_col16\" class=\"data row13 col16\" >1.645054</td>\n",
+ " <td id=\"T_168ea_row13_col17\" class=\"data row13 col17\" >0.293432</td>\n",
+ " <td id=\"T_168ea_row13_col18\" class=\"data row13 col18\" >-2.766822</td>\n",
+ " <td id=\"T_168ea_row13_col19\" class=\"data row13 col19\" >1.032849</td>\n",
+ " <td id=\"T_168ea_row13_col20\" class=\"data row13 col20\" >0.079115</td>\n",
+ " <td id=\"T_168ea_row13_col21\" class=\"data row13 col21\" >-1.414132</td>\n",
+ " <td id=\"T_168ea_row13_col22\" class=\"data row13 col22\" >1.463376</td>\n",
+ " <td id=\"T_168ea_row13_col23\" class=\"data row13 col23\" >2.335486</td>\n",
+ " <td id=\"T_168ea_row13_col24\" class=\"data row13 col24\" >0.411951</td>\n",
+ " <td id=\"T_168ea_row13_col25\" class=\"data row13 col25\" >-0.048543</td>\n",
+ " <td id=\"T_168ea_row13_col26\" class=\"data row13 col26\" >0.159284</td>\n",
+ " <td id=\"T_168ea_row13_col27\" class=\"data row13 col27\" >-0.651554</td>\n",
+ " <td id=\"T_168ea_row13_col28\" class=\"data row13 col28\" >-1.093128</td>\n",
+ " <td id=\"T_168ea_row13_col29\" class=\"data row13 col29\" >1.568390</td>\n",
+ " <td id=\"T_168ea_row13_col30\" class=\"data row13 col30\" >-0.077807</td>\n",
+ " <td id=\"T_168ea_row13_col31\" class=\"data row13 col31\" >-2.390779</td>\n",
+ " <td id=\"T_168ea_row13_col32\" class=\"data row13 col32\" >-0.842346</td>\n",
+ " <td id=\"T_168ea_row13_col33\" class=\"data row13 col33\" >-0.229675</td>\n",
+ " <td id=\"T_168ea_row13_col34\" class=\"data row13 col34\" >-0.999072</td>\n",
+ " <td id=\"T_168ea_row13_col35\" class=\"data row13 col35\" >-1.367219</td>\n",
+ " <td id=\"T_168ea_row13_col36\" class=\"data row13 col36\" >-0.792042</td>\n",
+ " <td id=\"T_168ea_row13_col37\" class=\"data row13 col37\" >-1.878575</td>\n",
+ " <td id=\"T_168ea_row13_col38\" class=\"data row13 col38\" >1.451452</td>\n",
+ " <td id=\"T_168ea_row13_col39\" class=\"data row13 col39\" >1.266250</td>\n",
+ " <td id=\"T_168ea_row13_col40\" class=\"data row13 col40\" >-0.734315</td>\n",
+ " <td id=\"T_168ea_row13_col41\" class=\"data row13 col41\" >0.266152</td>\n",
+ " <td id=\"T_168ea_row13_col42\" class=\"data row13 col42\" >0.735523</td>\n",
+ " <td id=\"T_168ea_row13_col43\" class=\"data row13 col43\" >-0.430860</td>\n",
+ " <td id=\"T_168ea_row13_col44\" class=\"data row13 col44\" >0.229864</td>\n",
+ " <td id=\"T_168ea_row13_col45\" class=\"data row13 col45\" >0.850083</td>\n",
+ " <td id=\"T_168ea_row13_col46\" class=\"data row13 col46\" >-2.241241</td>\n",
+ " <td id=\"T_168ea_row13_col47\" class=\"data row13 col47\" >1.063850</td>\n",
+ " <td id=\"T_168ea_row13_col48\" class=\"data row13 col48\" >0.289409</td>\n",
+ " <td id=\"T_168ea_row13_col49\" class=\"data row13 col49\" >-0.354360</td>\n",
+ " <td id=\"T_168ea_row13_col50\" class=\"data row13 col50\" >0.113063</td>\n",
+ " <td id=\"T_168ea_row13_col51\" class=\"data row13 col51\" >-0.173006</td>\n",
+ " <td id=\"T_168ea_row13_col52\" class=\"data row13 col52\" >1.386998</td>\n",
+ " <td id=\"T_168ea_row13_col53\" class=\"data row13 col53\" >1.886236</td>\n",
+ " <td id=\"T_168ea_row13_col54\" class=\"data row13 col54\" >0.587119</td>\n",
+ " <td id=\"T_168ea_row13_col55\" class=\"data row13 col55\" >-0.961133</td>\n",
+ " <td id=\"T_168ea_row13_col56\" class=\"data row13 col56\" >0.399295</td>\n",
+ " <td id=\"T_168ea_row13_col57\" class=\"data row13 col57\" >1.461560</td>\n",
+ " <td id=\"T_168ea_row13_col58\" class=\"data row13 col58\" >0.310823</td>\n",
+ " <td id=\"T_168ea_row13_col59\" class=\"data row13 col59\" >0.280220</td>\n",
+ " <td id=\"T_168ea_row13_col60\" class=\"data row13 col60\" >-0.879103</td>\n",
+ " <td id=\"T_168ea_row13_col61\" class=\"data row13 col61\" >-1.326348</td>\n",
+ " <td id=\"T_168ea_row13_col62\" class=\"data row13 col62\" >0.003337</td>\n",
+ " <td id=\"T_168ea_row13_col63\" class=\"data row13 col63\" >-1.085908</td>\n",
+ " <td id=\"T_168ea_row13_col64\" class=\"data row13 col64\" >-0.436723</td>\n",
+ " <td id=\"T_168ea_row13_col65\" class=\"data row13 col65\" >2.111926</td>\n",
+ " <td id=\"T_168ea_row13_col66\" class=\"data row13 col66\" >0.106068</td>\n",
+ " <td id=\"T_168ea_row13_col67\" class=\"data row13 col67\" >0.615597</td>\n",
+ " <td id=\"T_168ea_row13_col68\" class=\"data row13 col68\" >2.152996</td>\n",
+ " <td id=\"T_168ea_row13_col69\" class=\"data row13 col69\" >-0.196155</td>\n",
+ " <td id=\"T_168ea_row13_col70\" class=\"data row13 col70\" >0.025747</td>\n",
+ " <td id=\"T_168ea_row13_col71\" class=\"data row13 col71\" >-0.039061</td>\n",
+ " <td id=\"T_168ea_row13_col72\" class=\"data row13 col72\" >0.656823</td>\n",
+ " <td id=\"T_168ea_row13_col73\" class=\"data row13 col73\" >-0.347105</td>\n",
+ " <td id=\"T_168ea_row13_col74\" class=\"data row13 col74\" >2.513979</td>\n",
+ " <td id=\"T_168ea_row13_col75\" class=\"data row13 col75\" >1.758070</td>\n",
+ " <td id=\"T_168ea_row13_col76\" class=\"data row13 col76\" >1.288473</td>\n",
+ " <td id=\"T_168ea_row13_col77\" class=\"data row13 col77\" >-0.739185</td>\n",
+ " <td id=\"T_168ea_row13_col78\" class=\"data row13 col78\" >-0.691592</td>\n",
+ " <td id=\"T_168ea_row13_col79\" class=\"data row13 col79\" >-0.098728</td>\n",
+ " <td id=\"T_168ea_row13_col80\" class=\"data row13 col80\" >-0.276386</td>\n",
+ " <td id=\"T_168ea_row13_col81\" class=\"data row13 col81\" >0.489981</td>\n",
+ " <td id=\"T_168ea_row13_col82\" class=\"data row13 col82\" >0.516278</td>\n",
+ " <td id=\"T_168ea_row13_col83\" class=\"data row13 col83\" >-0.838258</td>\n",
+ " <td id=\"T_168ea_row13_col84\" class=\"data row13 col84\" >0.596673</td>\n",
+ " <td id=\"T_168ea_row13_col85\" class=\"data row13 col85\" >-0.331053</td>\n",
+ " <td id=\"T_168ea_row13_col86\" class=\"data row13 col86\" >0.521174</td>\n",
+ " <td id=\"T_168ea_row13_col87\" class=\"data row13 col87\" >-0.145023</td>\n",
+ " <td id=\"T_168ea_row13_col88\" class=\"data row13 col88\" >0.836693</td>\n",
+ " <td id=\"T_168ea_row13_col89\" class=\"data row13 col89\" >-1.092166</td>\n",
+ " <td id=\"T_168ea_row13_col90\" class=\"data row13 col90\" >0.361733</td>\n",
+ " <td id=\"T_168ea_row13_col91\" class=\"data row13 col91\" >-1.169981</td>\n",
+ " <td id=\"T_168ea_row13_col92\" class=\"data row13 col92\" >0.046731</td>\n",
+ " <td id=\"T_168ea_row13_col93\" class=\"data row13 col93\" >0.655377</td>\n",
+ " <td id=\"T_168ea_row13_col94\" class=\"data row13 col94\" >-0.756852</td>\n",
+ " <td id=\"T_168ea_row13_col95\" class=\"data row13 col95\" >1.285805</td>\n",
+ " <td id=\"T_168ea_row13_col96\" class=\"data row13 col96\" >-0.095019</td>\n",
+ " <td id=\"T_168ea_row13_col97\" class=\"data row13 col97\" >0.360253</td>\n",
+ " <td id=\"T_168ea_row13_col98\" class=\"data row13 col98\" >1.370621</td>\n",
+ " <td id=\"T_168ea_row13_col99\" class=\"data row13 col99\" >0.083010</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row14\" class=\"row_heading level2 row14\" >2</th>\n",
+ " <td id=\"T_168ea_row14_col0\" class=\"data row14 col0\" >0.888893</td>\n",
+ " <td id=\"T_168ea_row14_col1\" class=\"data row14 col1\" >2.288725</td>\n",
+ " <td id=\"T_168ea_row14_col2\" class=\"data row14 col2\" >-1.032332</td>\n",
+ " <td id=\"T_168ea_row14_col3\" class=\"data row14 col3\" >0.212273</td>\n",
+ " <td id=\"T_168ea_row14_col4\" class=\"data row14 col4\" >-1.091826</td>\n",
+ " <td id=\"T_168ea_row14_col5\" class=\"data row14 col5\" >1.692498</td>\n",
+ " <td id=\"T_168ea_row14_col6\" class=\"data row14 col6\" >1.025367</td>\n",
+ " <td id=\"T_168ea_row14_col7\" class=\"data row14 col7\" >0.550854</td>\n",
+ " <td id=\"T_168ea_row14_col8\" class=\"data row14 col8\" >0.679430</td>\n",
+ " <td id=\"T_168ea_row14_col9\" class=\"data row14 col9\" >-1.335712</td>\n",
+ " <td id=\"T_168ea_row14_col10\" class=\"data row14 col10\" >-0.798341</td>\n",
+ " <td id=\"T_168ea_row14_col11\" class=\"data row14 col11\" >2.265351</td>\n",
+ " <td id=\"T_168ea_row14_col12\" class=\"data row14 col12\" >-1.006938</td>\n",
+ " <td id=\"T_168ea_row14_col13\" class=\"data row14 col13\" >2.059761</td>\n",
+ " <td id=\"T_168ea_row14_col14\" class=\"data row14 col14\" >0.420266</td>\n",
+ " <td id=\"T_168ea_row14_col15\" class=\"data row14 col15\" >-1.189657</td>\n",
+ " <td id=\"T_168ea_row14_col16\" class=\"data row14 col16\" >0.506674</td>\n",
+ " <td id=\"T_168ea_row14_col17\" class=\"data row14 col17\" >0.260847</td>\n",
+ " <td id=\"T_168ea_row14_col18\" class=\"data row14 col18\" >-0.533145</td>\n",
+ " <td id=\"T_168ea_row14_col19\" class=\"data row14 col19\" >0.727267</td>\n",
+ " <td id=\"T_168ea_row14_col20\" class=\"data row14 col20\" >1.412276</td>\n",
+ " <td id=\"T_168ea_row14_col21\" class=\"data row14 col21\" >1.482106</td>\n",
+ " <td id=\"T_168ea_row14_col22\" class=\"data row14 col22\" >-0.996258</td>\n",
+ " <td id=\"T_168ea_row14_col23\" class=\"data row14 col23\" >0.588641</td>\n",
+ " <td id=\"T_168ea_row14_col24\" class=\"data row14 col24\" >-0.412642</td>\n",
+ " <td id=\"T_168ea_row14_col25\" class=\"data row14 col25\" >-0.920733</td>\n",
+ " <td id=\"T_168ea_row14_col26\" class=\"data row14 col26\" >-0.874691</td>\n",
+ " <td id=\"T_168ea_row14_col27\" class=\"data row14 col27\" >0.839002</td>\n",
+ " <td id=\"T_168ea_row14_col28\" class=\"data row14 col28\" >0.501668</td>\n",
+ " <td id=\"T_168ea_row14_col29\" class=\"data row14 col29\" >-0.342493</td>\n",
+ " <td id=\"T_168ea_row14_col30\" class=\"data row14 col30\" >-0.533806</td>\n",
+ " <td id=\"T_168ea_row14_col31\" class=\"data row14 col31\" >-2.146352</td>\n",
+ " <td id=\"T_168ea_row14_col32\" class=\"data row14 col32\" >-0.597339</td>\n",
+ " <td id=\"T_168ea_row14_col33\" class=\"data row14 col33\" >0.115726</td>\n",
+ " <td id=\"T_168ea_row14_col34\" class=\"data row14 col34\" >0.850683</td>\n",
+ " <td id=\"T_168ea_row14_col35\" class=\"data row14 col35\" >-0.752239</td>\n",
+ " <td id=\"T_168ea_row14_col36\" class=\"data row14 col36\" >0.377263</td>\n",
+ " <td id=\"T_168ea_row14_col37\" class=\"data row14 col37\" >-0.561982</td>\n",
+ " <td id=\"T_168ea_row14_col38\" class=\"data row14 col38\" >0.262783</td>\n",
+ " <td id=\"T_168ea_row14_col39\" class=\"data row14 col39\" >-0.356676</td>\n",
+ " <td id=\"T_168ea_row14_col40\" class=\"data row14 col40\" >-0.367462</td>\n",
+ " <td id=\"T_168ea_row14_col41\" class=\"data row14 col41\" >0.753611</td>\n",
+ " <td id=\"T_168ea_row14_col42\" class=\"data row14 col42\" >-1.267414</td>\n",
+ " <td id=\"T_168ea_row14_col43\" class=\"data row14 col43\" >-1.330698</td>\n",
+ " <td id=\"T_168ea_row14_col44\" class=\"data row14 col44\" >-0.536453</td>\n",
+ " <td id=\"T_168ea_row14_col45\" class=\"data row14 col45\" >0.840938</td>\n",
+ " <td id=\"T_168ea_row14_col46\" class=\"data row14 col46\" >-0.763108</td>\n",
+ " <td id=\"T_168ea_row14_col47\" class=\"data row14 col47\" >-0.268100</td>\n",
+ " <td id=\"T_168ea_row14_col48\" class=\"data row14 col48\" >-0.677424</td>\n",
+ " <td id=\"T_168ea_row14_col49\" class=\"data row14 col49\" >1.606831</td>\n",
+ " <td id=\"T_168ea_row14_col50\" class=\"data row14 col50\" >0.151732</td>\n",
+ " <td id=\"T_168ea_row14_col51\" class=\"data row14 col51\" >-2.085701</td>\n",
+ " <td id=\"T_168ea_row14_col52\" class=\"data row14 col52\" >1.219296</td>\n",
+ " <td id=\"T_168ea_row14_col53\" class=\"data row14 col53\" >0.400863</td>\n",
+ " <td id=\"T_168ea_row14_col54\" class=\"data row14 col54\" >0.591165</td>\n",
+ " <td id=\"T_168ea_row14_col55\" class=\"data row14 col55\" >-1.485213</td>\n",
+ " <td id=\"T_168ea_row14_col56\" class=\"data row14 col56\" >1.501979</td>\n",
+ " <td id=\"T_168ea_row14_col57\" class=\"data row14 col57\" >1.196569</td>\n",
+ " <td id=\"T_168ea_row14_col58\" class=\"data row14 col58\" >-0.214154</td>\n",
+ " <td id=\"T_168ea_row14_col59\" class=\"data row14 col59\" >0.339554</td>\n",
+ " <td id=\"T_168ea_row14_col60\" class=\"data row14 col60\" >-0.034446</td>\n",
+ " <td id=\"T_168ea_row14_col61\" class=\"data row14 col61\" >1.176452</td>\n",
+ " <td id=\"T_168ea_row14_col62\" class=\"data row14 col62\" >0.546340</td>\n",
+ " <td id=\"T_168ea_row14_col63\" class=\"data row14 col63\" >-1.255630</td>\n",
+ " <td id=\"T_168ea_row14_col64\" class=\"data row14 col64\" >-1.309210</td>\n",
+ " <td id=\"T_168ea_row14_col65\" class=\"data row14 col65\" >-0.445437</td>\n",
+ " <td id=\"T_168ea_row14_col66\" class=\"data row14 col66\" >0.189437</td>\n",
+ " <td id=\"T_168ea_row14_col67\" class=\"data row14 col67\" >-0.737463</td>\n",
+ " <td id=\"T_168ea_row14_col68\" class=\"data row14 col68\" >0.843767</td>\n",
+ " <td id=\"T_168ea_row14_col69\" class=\"data row14 col69\" >-0.605632</td>\n",
+ " <td id=\"T_168ea_row14_col70\" class=\"data row14 col70\" >-0.060777</td>\n",
+ " <td id=\"T_168ea_row14_col71\" class=\"data row14 col71\" >0.409310</td>\n",
+ " <td id=\"T_168ea_row14_col72\" class=\"data row14 col72\" >1.285569</td>\n",
+ " <td id=\"T_168ea_row14_col73\" class=\"data row14 col73\" >-0.622638</td>\n",
+ " <td id=\"T_168ea_row14_col74\" class=\"data row14 col74\" >1.018193</td>\n",
+ " <td id=\"T_168ea_row14_col75\" class=\"data row14 col75\" >0.880680</td>\n",
+ " <td id=\"T_168ea_row14_col76\" class=\"data row14 col76\" >0.046805</td>\n",
+ " <td id=\"T_168ea_row14_col77\" class=\"data row14 col77\" >-1.818058</td>\n",
+ " <td id=\"T_168ea_row14_col78\" class=\"data row14 col78\" >-0.809829</td>\n",
+ " <td id=\"T_168ea_row14_col79\" class=\"data row14 col79\" >0.875224</td>\n",
+ " <td id=\"T_168ea_row14_col80\" class=\"data row14 col80\" >0.409569</td>\n",
+ " <td id=\"T_168ea_row14_col81\" class=\"data row14 col81\" >-0.116621</td>\n",
+ " <td id=\"T_168ea_row14_col82\" class=\"data row14 col82\" >-1.238919</td>\n",
+ " <td id=\"T_168ea_row14_col83\" class=\"data row14 col83\" >3.305724</td>\n",
+ " <td id=\"T_168ea_row14_col84\" class=\"data row14 col84\" >-0.024121</td>\n",
+ " <td id=\"T_168ea_row14_col85\" class=\"data row14 col85\" >-1.756500</td>\n",
+ " <td id=\"T_168ea_row14_col86\" class=\"data row14 col86\" >1.328958</td>\n",
+ " <td id=\"T_168ea_row14_col87\" class=\"data row14 col87\" >0.507593</td>\n",
+ " <td id=\"T_168ea_row14_col88\" class=\"data row14 col88\" >-0.866554</td>\n",
+ " <td id=\"T_168ea_row14_col89\" class=\"data row14 col89\" >-2.240848</td>\n",
+ " <td id=\"T_168ea_row14_col90\" class=\"data row14 col90\" >-0.661376</td>\n",
+ " <td id=\"T_168ea_row14_col91\" class=\"data row14 col91\" >-0.671824</td>\n",
+ " <td id=\"T_168ea_row14_col92\" class=\"data row14 col92\" >0.215720</td>\n",
+ " <td id=\"T_168ea_row14_col93\" class=\"data row14 col93\" >-0.296326</td>\n",
+ " <td id=\"T_168ea_row14_col94\" class=\"data row14 col94\" >0.481402</td>\n",
+ " <td id=\"T_168ea_row14_col95\" class=\"data row14 col95\" >0.829645</td>\n",
+ " <td id=\"T_168ea_row14_col96\" class=\"data row14 col96\" >-0.721025</td>\n",
+ " <td id=\"T_168ea_row14_col97\" class=\"data row14 col97\" >1.263914</td>\n",
+ " <td id=\"T_168ea_row14_col98\" class=\"data row14 col98\" >0.549047</td>\n",
+ " <td id=\"T_168ea_row14_col99\" class=\"data row14 col99\" >-1.234945</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_168ea_level2_row15\" class=\"row_heading level2 row15\" >3</th>\n",
+ " <td id=\"T_168ea_row15_col0\" class=\"data row15 col0\" >-1.978838</td>\n",
+ " <td id=\"T_168ea_row15_col1\" class=\"data row15 col1\" >0.721823</td>\n",
+ " <td id=\"T_168ea_row15_col2\" class=\"data row15 col2\" >-0.559067</td>\n",
+ " <td id=\"T_168ea_row15_col3\" class=\"data row15 col3\" >-1.235243</td>\n",
+ " <td id=\"T_168ea_row15_col4\" class=\"data row15 col4\" >0.420716</td>\n",
+ " <td id=\"T_168ea_row15_col5\" class=\"data row15 col5\" >-0.598845</td>\n",
+ " <td id=\"T_168ea_row15_col6\" class=\"data row15 col6\" >0.359576</td>\n",
+ " <td id=\"T_168ea_row15_col7\" class=\"data row15 col7\" >-0.619366</td>\n",
+ " <td id=\"T_168ea_row15_col8\" class=\"data row15 col8\" >-1.757772</td>\n",
+ " <td id=\"T_168ea_row15_col9\" class=\"data row15 col9\" >-1.156251</td>\n",
+ " <td id=\"T_168ea_row15_col10\" class=\"data row15 col10\" >0.705212</td>\n",
+ " <td id=\"T_168ea_row15_col11\" class=\"data row15 col11\" >0.875071</td>\n",
+ " <td id=\"T_168ea_row15_col12\" class=\"data row15 col12\" >-1.020376</td>\n",
+ " <td id=\"T_168ea_row15_col13\" class=\"data row15 col13\" >0.394760</td>\n",
+ " <td id=\"T_168ea_row15_col14\" class=\"data row15 col14\" >-0.147970</td>\n",
+ " <td id=\"T_168ea_row15_col15\" class=\"data row15 col15\" >0.230249</td>\n",
+ " <td id=\"T_168ea_row15_col16\" class=\"data row15 col16\" >1.355203</td>\n",
+ " <td id=\"T_168ea_row15_col17\" class=\"data row15 col17\" >1.794488</td>\n",
+ " <td id=\"T_168ea_row15_col18\" class=\"data row15 col18\" >2.678058</td>\n",
+ " <td id=\"T_168ea_row15_col19\" class=\"data row15 col19\" >-0.153565</td>\n",
+ " <td id=\"T_168ea_row15_col20\" class=\"data row15 col20\" >-0.460959</td>\n",
+ " <td id=\"T_168ea_row15_col21\" class=\"data row15 col21\" >-0.098108</td>\n",
+ " <td id=\"T_168ea_row15_col22\" class=\"data row15 col22\" >-1.407930</td>\n",
+ " <td id=\"T_168ea_row15_col23\" class=\"data row15 col23\" >-2.487702</td>\n",
+ " <td id=\"T_168ea_row15_col24\" class=\"data row15 col24\" >1.823014</td>\n",
+ " <td id=\"T_168ea_row15_col25\" class=\"data row15 col25\" >0.099873</td>\n",
+ " <td id=\"T_168ea_row15_col26\" class=\"data row15 col26\" >-0.517603</td>\n",
+ " <td id=\"T_168ea_row15_col27\" class=\"data row15 col27\" >-0.509311</td>\n",
+ " <td id=\"T_168ea_row15_col28\" class=\"data row15 col28\" >-1.833175</td>\n",
+ " <td id=\"T_168ea_row15_col29\" class=\"data row15 col29\" >-0.900906</td>\n",
+ " <td id=\"T_168ea_row15_col30\" class=\"data row15 col30\" >0.459493</td>\n",
+ " <td id=\"T_168ea_row15_col31\" class=\"data row15 col31\" >-0.655440</td>\n",
+ " <td id=\"T_168ea_row15_col32\" class=\"data row15 col32\" >1.466122</td>\n",
+ " <td id=\"T_168ea_row15_col33\" class=\"data row15 col33\" >-1.531389</td>\n",
+ " <td id=\"T_168ea_row15_col34\" class=\"data row15 col34\" >-0.422106</td>\n",
+ " <td id=\"T_168ea_row15_col35\" class=\"data row15 col35\" >0.421422</td>\n",
+ " <td id=\"T_168ea_row15_col36\" class=\"data row15 col36\" >0.578615</td>\n",
+ " <td id=\"T_168ea_row15_col37\" class=\"data row15 col37\" >0.259795</td>\n",
+ " <td id=\"T_168ea_row15_col38\" class=\"data row15 col38\" >0.018941</td>\n",
+ " <td id=\"T_168ea_row15_col39\" class=\"data row15 col39\" >-0.168726</td>\n",
+ " <td id=\"T_168ea_row15_col40\" class=\"data row15 col40\" >1.611107</td>\n",
+ " <td id=\"T_168ea_row15_col41\" class=\"data row15 col41\" >-1.586550</td>\n",
+ " <td id=\"T_168ea_row15_col42\" class=\"data row15 col42\" >-1.384941</td>\n",
+ " <td id=\"T_168ea_row15_col43\" class=\"data row15 col43\" >0.858377</td>\n",
+ " <td id=\"T_168ea_row15_col44\" class=\"data row15 col44\" >1.033242</td>\n",
+ " <td id=\"T_168ea_row15_col45\" class=\"data row15 col45\" >1.701343</td>\n",
+ " <td id=\"T_168ea_row15_col46\" class=\"data row15 col46\" >1.748344</td>\n",
+ " <td id=\"T_168ea_row15_col47\" class=\"data row15 col47\" >-0.371182</td>\n",
+ " <td id=\"T_168ea_row15_col48\" class=\"data row15 col48\" >-0.843575</td>\n",
+ " <td id=\"T_168ea_row15_col49\" class=\"data row15 col49\" >2.089641</td>\n",
+ " <td id=\"T_168ea_row15_col50\" class=\"data row15 col50\" >-0.345430</td>\n",
+ " <td id=\"T_168ea_row15_col51\" class=\"data row15 col51\" >-1.740556</td>\n",
+ " <td id=\"T_168ea_row15_col52\" class=\"data row15 col52\" >0.141915</td>\n",
+ " <td id=\"T_168ea_row15_col53\" class=\"data row15 col53\" >-2.197138</td>\n",
+ " <td id=\"T_168ea_row15_col54\" class=\"data row15 col54\" >0.689569</td>\n",
+ " <td id=\"T_168ea_row15_col55\" class=\"data row15 col55\" >-0.150025</td>\n",
+ " <td id=\"T_168ea_row15_col56\" class=\"data row15 col56\" >0.287456</td>\n",
+ " <td id=\"T_168ea_row15_col57\" class=\"data row15 col57\" >0.654016</td>\n",
+ " <td id=\"T_168ea_row15_col58\" class=\"data row15 col58\" >-1.521919</td>\n",
+ " <td id=\"T_168ea_row15_col59\" class=\"data row15 col59\" >-0.918008</td>\n",
+ " <td id=\"T_168ea_row15_col60\" class=\"data row15 col60\" >-0.587528</td>\n",
+ " <td id=\"T_168ea_row15_col61\" class=\"data row15 col61\" >0.230636</td>\n",
+ " <td id=\"T_168ea_row15_col62\" class=\"data row15 col62\" >0.262637</td>\n",
+ " <td id=\"T_168ea_row15_col63\" class=\"data row15 col63\" >0.615674</td>\n",
+ " <td id=\"T_168ea_row15_col64\" class=\"data row15 col64\" >0.600044</td>\n",
+ " <td id=\"T_168ea_row15_col65\" class=\"data row15 col65\" >-0.494699</td>\n",
+ " <td id=\"T_168ea_row15_col66\" class=\"data row15 col66\" >-0.743089</td>\n",
+ " <td id=\"T_168ea_row15_col67\" class=\"data row15 col67\" >0.220026</td>\n",
+ " <td id=\"T_168ea_row15_col68\" class=\"data row15 col68\" >-0.242207</td>\n",
+ " <td id=\"T_168ea_row15_col69\" class=\"data row15 col69\" >0.528216</td>\n",
+ " <td id=\"T_168ea_row15_col70\" class=\"data row15 col70\" >-0.328174</td>\n",
+ " <td id=\"T_168ea_row15_col71\" class=\"data row15 col71\" >-1.536517</td>\n",
+ " <td id=\"T_168ea_row15_col72\" class=\"data row15 col72\" >-1.476640</td>\n",
+ " <td id=\"T_168ea_row15_col73\" class=\"data row15 col73\" >-1.162114</td>\n",
+ " <td id=\"T_168ea_row15_col74\" class=\"data row15 col74\" >-1.260222</td>\n",
+ " <td id=\"T_168ea_row15_col75\" class=\"data row15 col75\" >1.106252</td>\n",
+ " <td id=\"T_168ea_row15_col76\" class=\"data row15 col76\" >-1.467408</td>\n",
+ " <td id=\"T_168ea_row15_col77\" class=\"data row15 col77\" >-0.349341</td>\n",
+ " <td id=\"T_168ea_row15_col78\" class=\"data row15 col78\" >-1.841217</td>\n",
+ " <td id=\"T_168ea_row15_col79\" class=\"data row15 col79\" >0.031296</td>\n",
+ " <td id=\"T_168ea_row15_col80\" class=\"data row15 col80\" >-0.076475</td>\n",
+ " <td id=\"T_168ea_row15_col81\" class=\"data row15 col81\" >-0.353383</td>\n",
+ " <td id=\"T_168ea_row15_col82\" class=\"data row15 col82\" >0.807545</td>\n",
+ " <td id=\"T_168ea_row15_col83\" class=\"data row15 col83\" >0.779064</td>\n",
+ " <td id=\"T_168ea_row15_col84\" class=\"data row15 col84\" >-2.398417</td>\n",
+ " <td id=\"T_168ea_row15_col85\" class=\"data row15 col85\" >-0.267828</td>\n",
+ " <td id=\"T_168ea_row15_col86\" class=\"data row15 col86\" >1.549734</td>\n",
+ " <td id=\"T_168ea_row15_col87\" class=\"data row15 col87\" >0.814397</td>\n",
+ " <td id=\"T_168ea_row15_col88\" class=\"data row15 col88\" >0.284770</td>\n",
+ " <td id=\"T_168ea_row15_col89\" class=\"data row15 col89\" >-0.659369</td>\n",
+ " <td id=\"T_168ea_row15_col90\" class=\"data row15 col90\" >0.761040</td>\n",
+ " <td id=\"T_168ea_row15_col91\" class=\"data row15 col91\" >-0.722067</td>\n",
+ " <td id=\"T_168ea_row15_col92\" class=\"data row15 col92\" >0.810332</td>\n",
+ " <td id=\"T_168ea_row15_col93\" class=\"data row15 col93\" >1.501295</td>\n",
+ " <td id=\"T_168ea_row15_col94\" class=\"data row15 col94\" >1.440865</td>\n",
+ " <td id=\"T_168ea_row15_col95\" class=\"data row15 col95\" >-1.367459</td>\n",
+ " <td id=\"T_168ea_row15_col96\" class=\"data row15 col96\" >-0.700301</td>\n",
+ " <td id=\"T_168ea_row15_col97\" class=\"data row15 col97\" >-1.540662</td>\n",
+ " <td id=\"T_168ea_row15_col98\" class=\"data row15 col98\" >0.159837</td>\n",
+ " <td id=\"T_168ea_row15_col99\" class=\"data row15 col99\" >-0.625415</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe632470>"
+ ]
+ },
+ "execution_count": 66,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "bigdf.index = pd.MultiIndex.from_product([[\"A\",\"B\"],[0,1],[0,1,2,3]])\n",
+ "bigdf.style.set_sticky(axis=\"index\", pixel_size=18, levels=[1,2])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### HTML Escaping\n",
+ "\n",
+ "Suppose you have to display HTML within HTML, that can be a bit of pain when the renderer can't distinguish. You can use the `escape` formatting option to handle this, and even use it within a formatter that contains HTML itself."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 67,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.168405Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.168230Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.173562Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.172984Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_5ad18\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_5ad18_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_5ad18_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_5ad18_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_5ad18_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_5ad18_row0_col0\" class=\"data row0 col0\" ><div></div></td>\n",
+ " <td id=\"T_5ad18_row0_col1\" class=\"data row0 col1\" >\"&other\"</td>\n",
+ " <td id=\"T_5ad18_row0_col2\" class=\"data row0 col2\" ><span></span></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe630a60>"
+ ]
+ },
+ "execution_count": 67,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4 = pd.DataFrame([['<div></div>', '\"&other\"', '<span></span>']])\n",
+ "df4.style"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 68,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.175940Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.175691Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.180358Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.179910Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_16b63\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_16b63_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_16b63_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_16b63_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_16b63_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_16b63_row0_col0\" class=\"data row0 col0\" ><div></div></td>\n",
+ " <td id=\"T_16b63_row0_col1\" class=\"data row0 col1\" >"&other"</td>\n",
+ " <td id=\"T_16b63_row0_col2\" class=\"data row0 col2\" ><span></span></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe6338b0>"
+ ]
+ },
+ "execution_count": 68,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.format(escape=\"html\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 69,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.182345Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.182141Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.186864Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.186350Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_a0a0b\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_a0a0b_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " <th id=\"T_a0a0b_level0_col1\" class=\"col_heading level0 col1\" >1</th>\n",
+ " <th id=\"T_a0a0b_level0_col2\" class=\"col_heading level0 col2\" >2</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_a0a0b_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_a0a0b_row0_col0\" class=\"data row0 col0\" ><a href=\"https://pandas.pydata.org\" target=\"_blank\"><div></div></a></td>\n",
+ " <td id=\"T_a0a0b_row0_col1\" class=\"data row0 col1\" ><a href=\"https://pandas.pydata.org\" target=\"_blank\">"&other"</a></td>\n",
+ " <td id=\"T_a0a0b_row0_col2\" class=\"data row0 col2\" ><a href=\"https://pandas.pydata.org\" target=\"_blank\"><span></span></a></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe633610>"
+ ]
+ },
+ "execution_count": 69,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.format('<a href=\"https://pandas.pydata.org\" target=\"_blank\">{}</a>', escape=\"html\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Export to Excel\n",
+ "\n",
+ "Some support (*since version 0.20.0*) is available for exporting styled `DataFrames` to Excel worksheets using the `OpenPyXL` or `XlsxWriter` engines. CSS2.2 properties handled include:\n",
+ "\n",
+ "- `background-color`\n",
+ "- `border-style` properties\n",
+ "- `border-width` properties\n",
+ "- `border-color` properties\n",
+ "- `color`\n",
+ "- `font-family`\n",
+ "- `font-style`\n",
+ "- `font-weight`\n",
+ "- `text-align`\n",
+ "- `text-decoration`\n",
+ "- `vertical-align`\n",
+ "- `white-space: nowrap`\n",
+ "\n",
+ "\n",
+ "- Shorthand and side-specific border properties are supported (e.g. `border-style` and `border-left-style`) as well as the `border` shorthands for all sides (`border: 1px solid green`) or specified sides (`border-left: 1px solid green`). Using a `border` shorthand will override any border properties set before it (See [CSS Working Group](https://drafts.csswg.org/css-backgrounds/#border-shorthands) for more details)\n",
+ "\n",
+ "\n",
+ "- Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported.\n",
+ "- The following pseudo CSS properties are also available to set Excel specific style properties:\n",
+ " - `number-format`\n",
+ " - `border-style` (for Excel-specific styles: \"hair\", \"mediumDashDot\", \"dashDotDot\", \"mediumDashDotDot\", \"dashDot\", \"slantDashDot\", or \"mediumDashed\")\n",
+ "\n",
+ "Table level styles, and data cell CSS-classes are not included in the export to Excel: individual cells must have their properties mapped by the `Styler.apply` and/or `Styler.map` methods."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 70,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.189350Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.189150Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.325477Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.324842Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "df2.style.\\\n",
+ " map(style_negative, props='color:red;').\\\n",
+ " highlight_max(axis=0).\\\n",
+ " to_excel('styled.xlsx', engine='openpyxl')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A screenshot of the output:\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Export to LaTeX\n",
+ "\n",
+ "There is support (*since version 1.3.0*) to export `Styler` to LaTeX. The documentation for the [.to_latex][latex] method gives further detail and numerous examples.\n",
+ "\n",
+ "[latex]: ../reference/api/pandas.io.formats.style.Styler.to_latex.rst"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## More About CSS and HTML\n",
+ "\n",
+ "Cascading Style Sheet (CSS) language, which is designed to influence how a browser renders HTML elements, has its own peculiarities. It never reports errors: it just silently ignores them and doesn't render your objects how you intend so can sometimes be frustrating. Here is a very brief primer on how ``Styler`` creates HTML and interacts with CSS, with advice on common pitfalls to avoid."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### CSS Classes and Ids\n",
+ "\n",
+ "The precise structure of the CSS `class` attached to each cell is as follows.\n",
+ "\n",
+ "- Cells with Index and Column names include `index_name` and `level<k>` where `k` is its level in a MultiIndex\n",
+ "- Index label cells include\n",
+ " + `row_heading`\n",
+ " + `level<k>` where `k` is the level in a MultiIndex\n",
+ " + `row<m>` where `m` is the numeric position of the row\n",
+ "- Column label cells include\n",
+ " + `col_heading`\n",
+ " + `level<k>` where `k` is the level in a MultiIndex\n",
+ " + `col<n>` where `n` is the numeric position of the column\n",
+ "- Data cells include\n",
+ " + `data`\n",
+ " + `row<m>`, where `m` is the numeric position of the cell.\n",
+ " + `col<n>`, where `n` is the numeric position of the cell.\n",
+ "- Blank cells include `blank`\n",
+ "- Trimmed cells include `col_trim` or `row_trim`\n",
+ "\n",
+ "The structure of the `id` is `T_uuid_level<k>_row<m>_col<n>` where `level<k>` is used only on headings, and headings will only have either `row<m>` or `col<n>` whichever is needed. By default we've also prepended each row/column identifier with a UUID unique to each DataFrame so that the style from one doesn't collide with the styling from another within the same notebook or page. You can read more about the use of UUIDs in [Optimization](#Optimization).\n",
+ "\n",
+ "We can see example of the HTML by calling the [.to_html()][tohtml] method.\n",
+ "\n",
+ "[tohtml]: ../reference/api/pandas.io.formats.style.Styler.to_html.rst"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 71,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.328152Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.327897Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.332384Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.331933Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "<table id=\"T_4f31a\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_4f31a_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ " <th id=\"T_4f31a_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_4f31a_level0_row0\" class=\"row_heading level0 row0\" >i1</th>\n",
+ " <td id=\"T_4f31a_row0_col0\" class=\"data row0 col0\" >1</td>\n",
+ " <td id=\"T_4f31a_row0_col1\" class=\"data row0 col1\" >2</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th id=\"T_4f31a_level0_row1\" class=\"row_heading level0 row1\" >i2</th>\n",
+ " <td id=\"T_4f31a_row1_col0\" class=\"data row1 col0\" >3</td>\n",
+ " <td id=\"T_4f31a_row1_col1\" class=\"data row1 col1\" >4</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(pd.DataFrame([[1,2],[3,4]], index=['i1', 'i2'], columns=['c1', 'c2']).style.to_html())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### CSS Hierarchies\n",
+ "\n",
+ "The examples have shown that when CSS styles overlap, the one that comes last in the HTML render, takes precedence. So the following yield different results:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 72,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.334353Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.334153Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.340159Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.339670Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_e78ee_row0_col0 {\n",
+ " color: green;\n",
+ " color: red;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_e78ee\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_e78ee_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_e78ee_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_e78ee_row0_col0\" class=\"data row0 col0\" >text</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfc48ee90>"
+ ]
+ },
+ "execution_count": 72,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4 = pd.DataFrame([['text']])\n",
+ "df4.style.map(lambda x: 'color:green;')\\\n",
+ " .map(lambda x: 'color:red;')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 73,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.342125Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.341912Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.346934Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.346505Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_ed2d6_row0_col0 {\n",
+ " color: red;\n",
+ " color: green;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_ed2d6\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_ed2d6_level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_ed2d6_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_ed2d6_row0_col0\" class=\"data row0 col0\" >text</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfc48ea70>"
+ ]
+ },
+ "execution_count": 73,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.map(lambda x: 'color:red;')\\\n",
+ " .map(lambda x: 'color:green;')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This is only true for CSS rules that are equivalent in hierarchy, or importance. You can read more about [CSS specificity here](https://www.w3schools.com/css/css_specificity.asp) but for our purposes it suffices to summarize the key points:\n",
+ "\n",
+ "A CSS importance score for each HTML element is derived by starting at zero and adding:\n",
+ "\n",
+ " - 1000 for an inline style attribute\n",
+ " - 100 for each ID\n",
+ " - 10 for each attribute, class or pseudo-class\n",
+ " - 1 for each element name or pseudo-element\n",
+ " \n",
+ "Let's use this to describe the action of the following configurations"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 74,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.348973Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.348797Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.353647Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.353172Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_a_ td {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_a__row0_col0 {\n",
+ " color: green;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_a_\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_a__level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_a__level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_a__row0_col0\" class=\"data row0 col0\" >text</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfc48f9a0>"
+ ]
+ },
+ "execution_count": 74,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.set_uuid('a_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'}])\\\n",
+ " .map(lambda x: 'color:green;')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This text is red because the generated selector `#T_a_ td` is worth 101 (ID plus element), whereas `#T_a_row0_col0` is only worth 100 (ID), so is considered inferior even though in the HTML it comes after the previous."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 75,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.355905Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.355709Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.361236Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.360831Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_b_ td {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_b_ .cls-1 {\n",
+ " color: blue;\n",
+ "}\n",
+ "#T_b__row0_col0 {\n",
+ " color: green;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_b_\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_b__level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_b__level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_b__row0_col0\" class=\"data row0 col0 cls-1\" >text</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfc48ef50>"
+ ]
+ },
+ "execution_count": 75,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.set_uuid('b_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
+ " {'selector': '.cls-1', 'props': 'color:blue;'}])\\\n",
+ " .map(lambda x: 'color:green;')\\\n",
+ " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the above case the text is blue because the selector `#T_b_ .cls-1` is worth 110 (ID plus class), which takes precedence."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 76,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.363320Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.363146Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.368521Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.368040Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_c_ td {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_c_ .cls-1 {\n",
+ " color: blue;\n",
+ "}\n",
+ "#T_c_ td.data {\n",
+ " color: yellow;\n",
+ "}\n",
+ "#T_c__row0_col0 {\n",
+ " color: green;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_c_\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_c__level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_c__level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_c__row0_col0\" class=\"data row0 col0 cls-1\" >text</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfc48f400>"
+ ]
+ },
+ "execution_count": 76,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.set_uuid('c_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
+ " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
+ " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
+ " .map(lambda x: 'color:green;')\\\n",
+ " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we have created another table style this time the selector `T_c_ td.data` (ID plus element plus class) gets bumped up to 111. \n",
+ "\n",
+ "If your style fails to be applied, and its really frustrating, try the `!important` trump card."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 77,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.370465Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.370292Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.377206Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.376711Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "#T_d_ td {\n",
+ " color: red;\n",
+ "}\n",
+ "#T_d_ .cls-1 {\n",
+ " color: blue;\n",
+ "}\n",
+ "#T_d_ td.data {\n",
+ " color: yellow;\n",
+ "}\n",
+ "#T_d__row0_col0 {\n",
+ " color: green !important;\n",
+ "}\n",
+ "</style>\n",
+ "<table id=\"T_d_\">\n",
+ " <thead>\n",
+ " <tr>\n",
+ " <th class=\"blank level0\" > </th>\n",
+ " <th id=\"T_d__level0_col0\" class=\"col_heading level0 col0\" >0</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th id=\"T_d__level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
+ " <td id=\"T_d__row0_col0\" class=\"data row0 col0 cls-1\" >text</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n"
+ ],
+ "text/plain": [
+ "<pandas.io.formats.style.Styler at 0x7f9bfe632c20>"
+ ]
+ },
+ "execution_count": 77,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df4.style.set_uuid('d_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
+ " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
+ " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
+ " .map(lambda x: 'color:green !important;')\\\n",
+ " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally got that green text after all!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Extensibility\n",
+ "\n",
+ "The core of pandas is, and will remain, its \"high-performance, easy-to-use data structures\".\n",
+ "With that in mind, we hope that `DataFrame.style` accomplishes two goals\n",
+ "\n",
+ "- Provide an API that is pleasing to use interactively and is \"good enough\" for many tasks\n",
+ "- Provide the foundations for dedicated libraries to build on\n",
+ "\n",
+ "If you build a great library on top of this, let us know and we'll [link](https://pandas.pydata.org/pandas-docs/stable/ecosystem.html) to it.\n",
+ "\n",
+ "### Subclassing\n",
+ "\n",
+ "If the default template doesn't quite suit your needs, you can subclass Styler and extend or override the template.\n",
+ "We'll show an example of extending the default template to insert a custom header before each table."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 78,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.379272Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.379067Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.381864Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.381358Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "from jinja2 import Environment, ChoiceLoader, FileSystemLoader\n",
+ "from IPython.display import HTML\n",
+ "from pandas.io.formats.style import Styler"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We'll use the following template:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 79,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.383867Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.383705Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.386517Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.386122Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{% extends \"html_table.tpl\" %}\n",
+ "{% block table %}\n",
+ "<h1>{{ table_title|default(\"My Table\") }}</h1>\n",
+ "{{ super() }}\n",
+ "{% endblock table %}\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "with open(\"templates/myhtml.tpl\") as f:\n",
+ " print(f.read())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now that we've created a template, we need to set up a subclass of ``Styler`` that\n",
+ "knows about it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 80,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.388727Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.388374Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.393250Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.392703Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "class MyStyler(Styler):\n",
+ " env = Environment(\n",
+ " loader=ChoiceLoader([\n",
+ " FileSystemLoader(\"templates\"), # contains ours\n",
+ " Styler.loader, # the default\n",
+ " ])\n",
+ " )\n",
+ " template_html_table = env.get_template(\"myhtml.tpl\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that we include the original loader in our environment's loader.\n",
+ "That's because we extend the original template, so the Jinja environment needs\n",
+ "to be able to find it.\n",
+ "\n",
+ "Now we can use that custom styler. It's `__init__` takes a DataFrame."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 81,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.395298Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.395120Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.411644Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.411141Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "\n",
+ "\n",
+ "<h1>My Table</h1>\n",
+ "\n",
+ "\n",
+ "<table id=\"T_25a22\">\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th class=\"blank\" > </th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th class=\"blank level0\" > </th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </thead>\n",
+ "\n",
+ "\n",
+ " <tbody>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"2\">A</th>\n",
+ "\n",
+ " <th id=\"T_25a22_level1_row0\" class=\"row_heading level1 row0\" >r1</th>\n",
+ "\n",
+ " <td id=\"T_25a22_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level1_row1\" class=\"row_heading level1 row1\" >r2</th>\n",
+ "\n",
+ " <td id=\"T_25a22_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level0_row2\" class=\"row_heading level0 row2\" rowspan=\"2\">B</th>\n",
+ "\n",
+ " <th id=\"T_25a22_level1_row2\" class=\"row_heading level1 row2\" >r1</th>\n",
+ "\n",
+ " <td id=\"T_25a22_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_25a22_level1_row3\" class=\"row_heading level1 row3\" >r2</th>\n",
+ "\n",
+ " <td id=\"T_25a22_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ "\n",
+ " <td id=\"T_25a22_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ "<__main__.MyStyler at 0x7f9bfc2e8820>"
+ ]
+ },
+ "execution_count": 81,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "MyStyler(df3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Our custom template accepts a `table_title` keyword. We can provide the value in the `.to_html` method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 82,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.413825Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.413517Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.418660Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.418230Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "\n",
+ "\n",
+ "<h1>Extending Example</h1>\n",
+ "\n",
+ "\n",
+ "<table id=\"T_a90db\">\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th class=\"blank\" > </th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th class=\"blank level0\" > </th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </thead>\n",
+ "\n",
+ "\n",
+ " <tbody>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"2\">A</th>\n",
+ "\n",
+ " <th id=\"T_a90db_level1_row0\" class=\"row_heading level1 row0\" >r1</th>\n",
+ "\n",
+ " <td id=\"T_a90db_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level1_row1\" class=\"row_heading level1 row1\" >r2</th>\n",
+ "\n",
+ " <td id=\"T_a90db_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level0_row2\" class=\"row_heading level0 row2\" rowspan=\"2\">B</th>\n",
+ "\n",
+ " <th id=\"T_a90db_level1_row2\" class=\"row_heading level1 row2\" >r1</th>\n",
+ "\n",
+ " <td id=\"T_a90db_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_a90db_level1_row3\" class=\"row_heading level1 row3\" >r2</th>\n",
+ "\n",
+ " <td id=\"T_a90db_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ "\n",
+ " <td id=\"T_a90db_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ "execution_count": 82,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "HTML(MyStyler(df3).to_html(table_title=\"Extending Example\"))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For convenience, we provide the `Styler.from_custom_template` method that does the same as the custom subclass."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 83,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.421513Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.420453Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.437797Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.437150Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">\n",
+ "</style>\n",
+ "\n",
+ "\n",
+ "<h1>Another Title</h1>\n",
+ "\n",
+ "\n",
+ "<table id=\"T_bfa4e\">\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th class=\"blank\" > </th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th class=\"blank level0\" > </th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level0_col0\" class=\"col_heading level0 col0\" >c1</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level0_col1\" class=\"col_heading level0 col1\" >c2</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level0_col2\" class=\"col_heading level0 col2\" >c3</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level0_col3\" class=\"col_heading level0 col3\" >c4</th>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </thead>\n",
+ "\n",
+ "\n",
+ " <tbody>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"2\">A</th>\n",
+ "\n",
+ " <th id=\"T_bfa4e_level1_row0\" class=\"row_heading level1 row0\" >r1</th>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row0_col0\" class=\"data row0 col0\" >-1.048553</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row0_col1\" class=\"data row0 col1\" >-1.420018</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row0_col2\" class=\"data row0 col2\" >-1.706270</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row0_col3\" class=\"data row0 col3\" >1.950775</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level1_row1\" class=\"row_heading level1 row1\" >r2</th>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row1_col0\" class=\"data row1 col0\" >-0.509652</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row1_col1\" class=\"data row1 col1\" >-0.438074</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row1_col2\" class=\"data row1 col2\" >-1.252795</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row1_col3\" class=\"data row1 col3\" >0.777490</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level0_row2\" class=\"row_heading level0 row2\" rowspan=\"2\">B</th>\n",
+ "\n",
+ " <th id=\"T_bfa4e_level1_row2\" class=\"row_heading level1 row2\" >r1</th>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row2_col0\" class=\"data row2 col0\" >-1.613898</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row2_col1\" class=\"data row2 col1\" >-0.212740</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row2_col2\" class=\"data row2 col2\" >-0.895467</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row2_col3\" class=\"data row2 col3\" >0.386902</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " <tr>\n",
+ "\n",
+ "\n",
+ " <th id=\"T_bfa4e_level1_row3\" class=\"row_heading level1 row3\" >r2</th>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row3_col0\" class=\"data row3 col0\" >-0.510805</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row3_col1\" class=\"data row3 col1\" >-1.180632</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row3_col2\" class=\"data row3 col2\" >-0.028182</td>\n",
+ "\n",
+ " <td id=\"T_bfa4e_row3_col3\" class=\"data row3 col3\" >0.428332</td>\n",
+ "\n",
+ "\n",
+ " </tr>\n",
+ "\n",
+ "\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ "execution_count": 83,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "EasyStyler = Styler.from_custom_template(\"templates\", \"myhtml.tpl\")\n",
+ "HTML(EasyStyler(df3).to_html(table_title=\"Another Title\"))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Template Structure\n",
+ "\n",
+ "Here's the template structure for the both the style generation template and the table generation template:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Style template:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 84,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.440058Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.439826Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.442696Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.442159Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "with open(\"templates/html_style_structure.html\") as f:\n",
+ " style_structure = f.read()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 85,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.444611Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.444412Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.447683Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.447270Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<!--\n",
+ " This is an HTML fragment that gets included into a notebook & rst document\n",
+ "\n",
+ " Inspired by nbconvert\n",
+ "\n",
+ " https://github.com/jupyter/nbconvert/blob/8ac591a0b8694147d0f34bf6392594c2811c1395/docs/source/template_structure.html\n",
+ "\n",
+ "\n",
+ " -->\n",
+ "<style type=\"text/css\">\n",
+ " /* Overrides of notebook CSS for static HTML export */\n",
+ " .template_block {\n",
+ " background-color: hsla(120, 60%, 70%, 0.2);\n",
+ " margin: 10px;\n",
+ " padding: 5px;\n",
+ " border: 1px solid hsla(120, 60%, 70%, 0.5);\n",
+ " border-left: 2px solid black;\n",
+ " }\n",
+ " .template_block pre {\n",
+ " background: transparent;\n",
+ " padding: 0;\n",
+ " }\n",
+ " .big_vertical_ellipsis {\n",
+ " font-size: 24pt;\n",
+ " }\n",
+ "</style>\n",
+ "\n",
+ "<div class=\"template_block\">before_style</div>\n",
+ "<div class=\"template_block\">style\n",
+ " <pre><style type="text/css"></pre>\n",
+ " <div class=\"template_block\">table_styles</div>\n",
+ " <div class=\"template_block\">before_cellstyle</div>\n",
+ " <div class=\"template_block\">cellstyle</div>\n",
+ " <pre></style></pre>\n",
+ "</div><!-- /style -->\n"
+ ],
+ "text/plain": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ "execution_count": 85,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "HTML(style_structure)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Table template:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 86,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.449652Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.449484Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.452058Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.451665Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "with open(\"templates/html_table_structure.html\") as f:\n",
+ " table_structure = f.read()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 87,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.453937Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.453765Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.457631Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.457086Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<!--\n",
+ " This is an HTML fragment that gets included into a notebook & rst document\n",
+ "\n",
+ " Inspired by nbconvert\n",
+ "\n",
+ " https://github.com/jupyter/nbconvert/blob/8ac591a0b8694147d0f34bf6392594c2811c1395/docs/source/template_structure.html\n",
+ "\n",
+ "\n",
+ " -->\n",
+ "<style type=\"text/css\">\n",
+ " /* Overrides of notebook CSS for static HTML export */\n",
+ " .template_block {\n",
+ " background-color: hsla(120, 60%, 70%, 0.2);\n",
+ " margin: 10px;\n",
+ " padding: 5px;\n",
+ " border: 1px solid hsla(120, 60%, 70%, 0.5);\n",
+ " border-left: 2px solid black;\n",
+ " }\n",
+ " .template_block pre {\n",
+ " background: transparent;\n",
+ " padding: 0;\n",
+ " }\n",
+ " .big_vertical_ellipsis {\n",
+ " font-size: 24pt;\n",
+ " }\n",
+ "</style>\n",
+ "\n",
+ "<div class=\"template_block\" >before_table</div>\n",
+ "\n",
+ "<div class=\"template_block\" >table\n",
+ " <pre><table ...></pre>\n",
+ " <div class=\"template_block\">caption</div>\n",
+ "\n",
+ " <div class=\"template_block\" >thead\n",
+ " <div class=\"template_block\" >before_head_rows</div>\n",
+ " <div class=\"template_block\">head_tr (loop over headers)</div>\n",
+ " <div class=\"template_block\" >after_head_rows</div>\n",
+ " </div>\n",
+ "\n",
+ " <div class=\"template_block\" >tbody\n",
+ " <div class=\"template_block\" >before_rows</div>\n",
+ " <div class=\"template_block\">tr (loop over data rows)</div>\n",
+ " <div class=\"template_block\" >after_rows</div>\n",
+ " </div>\n",
+ " <pre></table></pre>\n",
+ "</div><!-- /table -->\n",
+ "\n",
+ "<div class=\"template_block\" >after_table</div>\n"
+ ],
+ "text/plain": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ "execution_count": 87,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "HTML(table_structure)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "See the template in the [GitHub repo](https://github.com/pandas-dev/pandas) for more details."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 88,
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-11-30T07:57:23.460006Z",
+ "iopub.status.busy": "2023-11-30T07:57:23.459770Z",
+ "iopub.status.idle": "2023-11-30T07:57:23.462163Z",
+ "shell.execute_reply": "2023-11-30T07:57:23.461758Z"
+ },
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "# # Hack to get the same style in the notebook as the\n",
+ "# # main site. This is hidden in the docs.\n",
+ "# from IPython.display import HTML\n",
+ "# with open(\"themes/nature_with_gtoc/static/nature.css_t\") as f:\n",
+ "# css = f.read()\n",
+ " \n",
+ "# HTML('<style>{}</style>'.format(css))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.12"
+ },
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "state": {
+ "0b274cfa61324b629f76caa970339bad": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "10cc545d67b143da9a8ce1c13db64b16": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1352642f7b124d26872b8bb26ce521a3": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "SliderStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "SliderStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "description_width": "",
+ "handle_color": null
+ }
+ },
+ "33a552776e594fc38e513e7e728f827a": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "590d62e0d0f64cfbad664aa03bf18eed": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "IntSliderModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "IntSliderModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "IntSliderView",
+ "behavior": "drag-tap",
+ "continuous_update": true,
+ "description": "h_neg",
+ "description_allow_html": false,
+ "disabled": false,
+ "layout": "IPY_MODEL_10cc545d67b143da9a8ce1c13db64b16",
+ "max": 359,
+ "min": 0,
+ "orientation": "horizontal",
+ "readout": true,
+ "readout_format": "d",
+ "step": 1,
+ "style": "IPY_MODEL_74d348d976c0430aaa05b5232393ad0d",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 179
+ }
+ },
+ "5d75689f49944cf9a16dfd84efe805cc": {
+ "model_module": "@jupyter-widgets/output",
+ "model_module_version": "1.0.0",
+ "model_name": "OutputModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/output",
+ "_model_module_version": "1.0.0",
+ "_model_name": "OutputModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/output",
+ "_view_module_version": "1.0.0",
+ "_view_name": "OutputView",
+ "layout": "IPY_MODEL_0b274cfa61324b629f76caa970339bad",
+ "msg_id": "",
+ "outputs": [
+ {
+ "data": {
+ "text/html": "<style type=\"text/css\">\n#T_f890a_row0_col0 {\n background-color: #749691;\n color: #f1f1f1;\n}\n#T_f890a_row0_col1 {\n background-color: #b4c7c4;\n color: #000000;\n}\n#T_f890a_row0_col2, #T_f890a_row4_col3 {\n background-color: #000000;\n color: #f1f1f1;\n}\n#T_f890a_row0_col3, #T_f890a_row1_col2, #T_f890a_row5_col0, #T_f890a_row5_col3, #T_f890a_row6_col0, #T_f890a_row7_col1, #T_f890a_row8_col1, #T_f890a_row9_col2 {\n background-color: #557e79;\n color: #f1f1f1;\n}\n#T_f890a_row1_col0 {\n background-color: #6e928d;\n color: #f1f1f1;\n}\n#T_f890a_row1_col1 {\n background-color: #aec2bf;\n color: #000000;\n}\n#T_f890a_row1_col3 {\n background-color: #91aca8;\n color: #f1f1f1;\n}\n#T_f890a_row2_col0 {\n background-color: #ebf0ef;\n color: #000000;\n}\n#T_f890a_row2_col1 {\n background-color: #b3c6c3;\n color: #000000;\n}\n#T_f890a_row2_col2 {\n background-color: #cedbd9;\n color: #000000;\n}\n#T_f890a_row2_col3 {\n background-color: #a6bcb9;\n color: #000000;\n}\n#T_f890a_row3_col0 {\n background-color: #b5c8c5;\n color: #000000;\n}\n#T_f890a_row3_col1 {\n background-color: #ccd9d7;\n color: #000000;\n}\n#T_f890a_row3_col2, #T_f890a_row7_col3 {\n background-color: #c8d6d4;\n color: #000000;\n}\n#T_f890a_row3_col3 {\n background-color: #c4d2d0;\n color: #000000;\n}\n#T_f890a_row4_col0 {\n background-color: #86a4a0;\n color: #f1f1f1;\n}\n#T_f890a_row4_col1 {\n background-color: #eaefee;\n color: #000000;\n}\n#T_f890a_row4_col2 {\n background-color: #e6edec;\n color: #000000;\n}\n#T_f890a_row5_col1 {\n background-color: #9db5b1;\n color: #000000;\n}\n#T_f890a_row5_col2 {\n background-color: #698d88;\n color: #f1f1f1;\n}\n#T_f890a_row6_col1, #T_f890a_row7_col0 {\n background-color: #84a29e;\n color: #f1f1f1;\n}\n#T_f890a_row6_col2 {\n background-color: #b8c9c7;\n color: #000000;\n}\n#T_f890a_row6_col3 {\n background-color: #8da9a5;\n color: #f1f1f1;\n}\n#T_f890a_row7_col2 {\n background-color: #d1dcdb;\n color: #000000;\n}\n#T_f890a_row8_col0 {\n background-color: #bfcfcc;\n color: #000000;\n}\n#T_f890a_row8_col2 {\n background-color: #5e857f;\n color: #f1f1f1;\n}\n#T_f890a_row8_col3 {\n background-color: #b2c5c2;\n color: #000000;\n}\n#T_f890a_row9_col0 {\n background-color: #97b0ad;\n color: #f1f1f1;\n}\n#T_f890a_row9_col1 {\n background-color: #6c908b;\n color: #f1f1f1;\n}\n#T_f890a_row9_col3 {\n background-color: #82a09c;\n color: #f1f1f1;\n}\n</style>\n<table id=\"T_f890a\">\n <thead>\n <tr>\n <th class=\"blank level0\" > </th>\n <th id=\"T_f890a_level0_col0\" class=\"col_heading level0 col0\" >A</th>\n <th id=\"T_f890a_level0_col1\" class=\"col_heading level0 col1\" >B</th>\n <th id=\"T_f890a_level0_col2\" class=\"col_heading level0 col2\" >C</th>\n <th id=\"T_f890a_level0_col3\" class=\"col_heading level0 col3\" >D</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th id=\"T_f890a_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n <td id=\"T_f890a_row0_col0\" class=\"data row0 col0\" >1.764052</td>\n <td id=\"T_f890a_row0_col1\" class=\"data row0 col1\" >0.400157</td>\n <td id=\"T_f890a_row0_col2\" class=\"data row0 col2\" >nan</td>\n <td id=\"T_f890a_row0_col3\" class=\"data row0 col3\" >2.240893</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row1\" class=\"row_heading level0 row1\" >1</th>\n <td id=\"T_f890a_row1_col0\" class=\"data row1 col0\" >1.867558</td>\n <td id=\"T_f890a_row1_col1\" class=\"data row1 col1\" >-0.977278</td>\n <td id=\"T_f890a_row1_col2\" class=\"data row1 col2\" >0.950088</td>\n <td id=\"T_f890a_row1_col3\" class=\"data row1 col3\" >-0.151357</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row2\" class=\"row_heading level0 row2\" >2</th>\n <td id=\"T_f890a_row2_col0\" class=\"data row2 col0\" >-0.103219</td>\n <td id=\"T_f890a_row2_col1\" class=\"data row2 col1\" >0.410599</td>\n <td id=\"T_f890a_row2_col2\" class=\"data row2 col2\" >0.144044</td>\n <td id=\"T_f890a_row2_col3\" class=\"data row2 col3\" >1.454274</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row3\" class=\"row_heading level0 row3\" >3</th>\n <td id=\"T_f890a_row3_col0\" class=\"data row3 col0\" >0.761038</td>\n <td id=\"T_f890a_row3_col1\" class=\"data row3 col1\" >0.121675</td>\n <td id=\"T_f890a_row3_col2\" class=\"data row3 col2\" >0.443863</td>\n <td id=\"T_f890a_row3_col3\" class=\"data row3 col3\" >0.333674</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row4\" class=\"row_heading level0 row4\" >4</th>\n <td id=\"T_f890a_row4_col0\" class=\"data row4 col0\" >1.494079</td>\n <td id=\"T_f890a_row4_col1\" class=\"data row4 col1\" >-0.205158</td>\n <td id=\"T_f890a_row4_col2\" class=\"data row4 col2\" >0.313068</td>\n <td id=\"T_f890a_row4_col3\" class=\"data row4 col3\" >nan</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row5\" class=\"row_heading level0 row5\" >5</th>\n <td id=\"T_f890a_row5_col0\" class=\"data row5 col0\" >-2.552990</td>\n <td id=\"T_f890a_row5_col1\" class=\"data row5 col1\" >0.653619</td>\n <td id=\"T_f890a_row5_col2\" class=\"data row5 col2\" >0.864436</td>\n <td id=\"T_f890a_row5_col3\" class=\"data row5 col3\" >-0.742165</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row6\" class=\"row_heading level0 row6\" >6</th>\n <td id=\"T_f890a_row6_col0\" class=\"data row6 col0\" >2.269755</td>\n <td id=\"T_f890a_row6_col1\" class=\"data row6 col1\" >-1.454366</td>\n <td id=\"T_f890a_row6_col2\" class=\"data row6 col2\" >0.045759</td>\n <td id=\"T_f890a_row6_col3\" class=\"data row6 col3\" >-0.187184</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row7\" class=\"row_heading level0 row7\" >7</th>\n <td id=\"T_f890a_row7_col0\" class=\"data row7 col0\" >1.532779</td>\n <td id=\"T_f890a_row7_col1\" class=\"data row7 col1\" >1.469359</td>\n <td id=\"T_f890a_row7_col2\" class=\"data row7 col2\" >0.154947</td>\n <td id=\"T_f890a_row7_col3\" class=\"data row7 col3\" >0.378163</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row8\" class=\"row_heading level0 row8\" >8</th>\n <td id=\"T_f890a_row8_col0\" class=\"data row8 col0\" >-0.887786</td>\n <td id=\"T_f890a_row8_col1\" class=\"data row8 col1\" >-1.980796</td>\n <td id=\"T_f890a_row8_col2\" class=\"data row8 col2\" >-0.347912</td>\n <td id=\"T_f890a_row8_col3\" class=\"data row8 col3\" >0.156349</td>\n </tr>\n <tr>\n <th id=\"T_f890a_level0_row9\" class=\"row_heading level0 row9\" >9</th>\n <td id=\"T_f890a_row9_col0\" class=\"data row9 col0\" >1.230291</td>\n <td id=\"T_f890a_row9_col1\" class=\"data row9 col1\" >1.202380</td>\n <td id=\"T_f890a_row9_col2\" class=\"data row9 col2\" >-0.387327</td>\n <td id=\"T_f890a_row9_col3\" class=\"data row9 col3\" >-0.302303</td>\n </tr>\n </tbody>\n</table>\n",
+ "text/plain": "<pandas.io.formats.style.Styler at 0x7f9c0adbccd0>"
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "tabbable": null,
+ "tooltip": null
+ }
+ },
+ "5e9e1d53423142b2b8ba72e200ef4948": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "68d854511a7840298c717636ed46e1e5": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "SliderStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "SliderStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "description_width": "",
+ "handle_color": null
+ }
+ },
+ "74d348d976c0430aaa05b5232393ad0d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "SliderStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "SliderStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "description_width": "",
+ "handle_color": null
+ }
+ },
+ "8516b65980b94f6ca3995c73bb3d6a24": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "a00bd7141c364ef5a0e4d7acaddb69f9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "FloatSliderModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "FloatSliderModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "FloatSliderView",
+ "behavior": "drag-tap",
+ "continuous_update": true,
+ "description": "l",
+ "description_allow_html": false,
+ "disabled": false,
+ "layout": "IPY_MODEL_33a552776e594fc38e513e7e728f827a",
+ "max": 99.9,
+ "min": 0.0,
+ "orientation": "horizontal",
+ "readout": true,
+ "readout_format": ".2f",
+ "step": 0.1,
+ "style": "IPY_MODEL_1352642f7b124d26872b8bb26ce521a3",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 49.95
+ }
+ },
+ "af0b0fee7f814793b690e5e2c4cd730b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "VBoxModel",
+ "state": {
+ "_dom_classes": [
+ "widget-interact"
+ ],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "VBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "VBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_590d62e0d0f64cfbad664aa03bf18eed",
+ "IPY_MODEL_be6722376608436f8f073bc7e60d94be",
+ "IPY_MODEL_bf554f906e184d60a11ab1c01dc57fd4",
+ "IPY_MODEL_a00bd7141c364ef5a0e4d7acaddb69f9",
+ "IPY_MODEL_5d75689f49944cf9a16dfd84efe805cc"
+ ],
+ "layout": "IPY_MODEL_5e9e1d53423142b2b8ba72e200ef4948",
+ "tabbable": null,
+ "tooltip": null
+ }
+ },
+ "b1fd3ff46c7d44bb8a01c7b2fd323dc6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "SliderStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "SliderStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "description_width": "",
+ "handle_color": null
+ }
+ },
+ "be6722376608436f8f073bc7e60d94be": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "IntSliderModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "IntSliderModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "IntSliderView",
+ "behavior": "drag-tap",
+ "continuous_update": true,
+ "description": "h_pos",
+ "description_allow_html": false,
+ "disabled": false,
+ "layout": "IPY_MODEL_dd741c4353da4baa9bfad5705088933a",
+ "max": 359,
+ "min": 0,
+ "orientation": "horizontal",
+ "readout": true,
+ "readout_format": "d",
+ "step": 1,
+ "style": "IPY_MODEL_68d854511a7840298c717636ed46e1e5",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 179
+ }
+ },
+ "bf554f906e184d60a11ab1c01dc57fd4": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "FloatSliderModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "FloatSliderModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "FloatSliderView",
+ "behavior": "drag-tap",
+ "continuous_update": true,
+ "description": "s",
+ "description_allow_html": false,
+ "disabled": false,
+ "layout": "IPY_MODEL_8516b65980b94f6ca3995c73bb3d6a24",
+ "max": 99.9,
+ "min": 0.0,
+ "orientation": "horizontal",
+ "readout": true,
+ "readout_format": ".2f",
+ "step": 0.1,
+ "style": "IPY_MODEL_b1fd3ff46c7d44bb8a01c7b2fd323dc6",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 49.95
+ }
+ },
+ "dd741c4353da4baa9bfad5705088933a": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ }
+ },
+ "version_major": 2,
+ "version_minor": 0
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py
index 5786073c9d9cc..b3e0d9989fe82 100644
--- a/pandas/io/parsers/arrow_parser_wrapper.py
+++ b/pandas/io/parsers/arrow_parser_wrapper.py
@@ -298,7 +298,9 @@ def read(self) -> DataFrame:
frame = table.to_pandas(types_mapper=arrow_string_types_mapper())
else:
if isinstance(self.kwds.get("dtype"), dict):
- frame = table.to_pandas(types_mapper=self.kwds["dtype"].get)
+ frame = table.to_pandas(
+ types_mapper=self.kwds["dtype"].get, integer_object_nulls=True
+ )
else:
frame = table.to_pandas()
return self._finalize_pandas_output(frame)
| …={"a": "Int64"}, engine=pyarrow)
[] closes [BUG: read_csv losing precision when reading Int64[pyarrow] data with N/A values #56135](https://github.com/pandas-dev/pandas/issues/56135) and [BUG: read_csv loses precision when engine='pyarrow' and dtype Int64 #56136](https://github.com/pandas-dev/pandas/issues/56136)
[Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
Enable the integer_object_nulls option by setting it to True in the following line of code in the "arrow_parser_wrapper.py" file:
table.to_pandas(types_mapper=self.kwds["dtype"].get, integer_object_nulls=True)
This adjustment is crucial when using the "pyarrow" engine as a parameter for the 'read_csv' function. It ensures that columns containing integers (Int64) with null elements within a dataframe maintain their precision. Without this setting, numpy may treat columns with null elements by converting the entire column data to Float, leading to precision loss. Enabling 'integer_object_nulls' prevents this issue and preserves the precision of integer data.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56253 | 2023-11-30T07:51:54Z | 2023-11-30T17:32:27Z | null | 2023-11-30T17:32:28Z |
JSON code removals and cleanups | diff --git a/pandas/_libs/src/vendored/ujson/python/JSONtoObj.c b/pandas/_libs/src/vendored/ujson/python/JSONtoObj.c
index 147282c476c3b..e7c58d498f9be 100644
--- a/pandas/_libs/src/vendored/ujson/python/JSONtoObj.c
+++ b/pandas/_libs/src/vendored/ujson/python/JSONtoObj.c
@@ -45,16 +45,11 @@ Numeric decoder derived from TCL library
#include <Python.h>
#include <numpy/arrayobject.h>
-#define PRINTMARK()
-
typedef struct __PyObjectDecoder {
JSONObjectDecoder dec;
void *npyarr; // Numpy context buffer
void *npyarr_addr; // Ref to npyarr ptr to track DECREF calls
- npy_intp curdim; // Current array dimension
-
- PyArray_Descr *dtype;
} PyObjectDecoder;
typedef struct __NpyArrContext {
@@ -63,39 +58,16 @@ typedef struct __NpyArrContext {
PyArray_Dims shape;
PyObjectDecoder *dec;
-
- npy_intp i;
- npy_intp elsize;
- npy_intp elcount;
} NpyArrContext;
-// Numpy handling based on numpy internal code, specifically the function
-// PyArray_FromIter.
-
-// numpy related functions are inter-dependent so declare them all here,
-// to ensure the compiler catches any errors
-
-// standard numpy array handling
-JSOBJ Object_npyNewArray(void *prv, void *decoder);
-JSOBJ Object_npyEndArray(void *prv, JSOBJ obj);
-int Object_npyArrayAddItem(void *prv, JSOBJ obj, JSOBJ value);
-
-// for more complex dtypes (object and string) fill a standard Python list
-// and convert to a numpy array when done.
-JSOBJ Object_npyNewArrayList(void *prv, void *decoder);
-JSOBJ Object_npyEndArrayList(void *prv, JSOBJ obj);
-int Object_npyArrayListAddItem(void *prv, JSOBJ obj, JSOBJ value);
-
// free the numpy context buffer
void Npy_releaseContext(NpyArrContext *npyarr) {
- PRINTMARK();
if (npyarr) {
if (npyarr->shape.ptr) {
PyObject_Free(npyarr->shape.ptr);
}
if (npyarr->dec) {
npyarr->dec->npyarr = NULL;
- npyarr->dec->curdim = 0;
}
Py_XDECREF(npyarr->labels[0]);
Py_XDECREF(npyarr->labels[1]);
@@ -104,318 +76,58 @@ void Npy_releaseContext(NpyArrContext *npyarr) {
}
}
-JSOBJ Object_npyNewArray(void *prv, void *_decoder) {
- NpyArrContext *npyarr;
- PyObjectDecoder *decoder = (PyObjectDecoder *)_decoder;
- PRINTMARK();
- if (decoder->curdim <= 0) {
- // start of array - initialise the context buffer
- npyarr = decoder->npyarr = PyObject_Malloc(sizeof(NpyArrContext));
- decoder->npyarr_addr = npyarr;
-
- if (!npyarr) {
- PyErr_NoMemory();
- return NULL;
- }
-
- npyarr->dec = decoder;
- npyarr->labels[0] = npyarr->labels[1] = NULL;
-
- npyarr->shape.ptr = PyObject_Malloc(sizeof(npy_intp) * NPY_MAXDIMS);
- npyarr->shape.len = 1;
- npyarr->ret = NULL;
-
- npyarr->elsize = 0;
- npyarr->elcount = 4;
- npyarr->i = 0;
- } else {
- // starting a new dimension continue the current array (and reshape
- // after)
- npyarr = (NpyArrContext *)decoder->npyarr;
- if (decoder->curdim >= npyarr->shape.len) {
- npyarr->shape.len++;
- }
- }
-
- npyarr->shape.ptr[decoder->curdim] = 0;
- decoder->curdim++;
- return npyarr;
-}
-
-PyObject *Npy_returnLabelled(NpyArrContext *npyarr) {
- PyObject *ret = npyarr->ret;
- npy_intp i;
-
- if (npyarr->labels[0] || npyarr->labels[1]) {
- // finished decoding, build tuple with values and labels
- ret = PyTuple_New(npyarr->shape.len + 1);
- for (i = 0; i < npyarr->shape.len; i++) {
- if (npyarr->labels[i]) {
- PyTuple_SET_ITEM(ret, i + 1, npyarr->labels[i]);
- npyarr->labels[i] = NULL;
- } else {
- Py_INCREF(Py_None);
- PyTuple_SET_ITEM(ret, i + 1, Py_None);
- }
- }
- PyTuple_SET_ITEM(ret, 0, npyarr->ret);
- }
-
- return ret;
-}
-
-JSOBJ Object_npyEndArray(void *prv, JSOBJ obj) {
- PyObject *ret;
- char *new_data;
- NpyArrContext *npyarr = (NpyArrContext *)obj;
- int emptyType = NPY_DEFAULT_TYPE;
- npy_intp i;
- PRINTMARK();
- if (!npyarr) {
- return NULL;
- }
-
- ret = npyarr->ret;
- i = npyarr->i;
-
- npyarr->dec->curdim--;
-
- if (i == 0 || !npyarr->ret) {
- // empty array would not have been initialised so do it now.
- if (npyarr->dec->dtype) {
- emptyType = npyarr->dec->dtype->type_num;
- }
- npyarr->ret = ret =
- PyArray_EMPTY(npyarr->shape.len, npyarr->shape.ptr, emptyType, 0);
- } else if (npyarr->dec->curdim <= 0) {
- // realloc to final size
- new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * npyarr->elsize);
- if (new_data == NULL) {
- PyErr_NoMemory();
- Npy_releaseContext(npyarr);
- return NULL;
- }
- ((PyArrayObject *)ret)->data = (void *)new_data;
- // PyArray_BYTES(ret) = new_data;
- }
-
- if (npyarr->dec->curdim <= 0) {
- // finished decoding array, reshape if necessary
- if (npyarr->shape.len > 1) {
- npyarr->ret =
- PyArray_Newshape((PyArrayObject *)ret, &npyarr->shape, NPY_ANYORDER);
- Py_DECREF(ret);
- }
-
- ret = Npy_returnLabelled(npyarr);
-
- npyarr->ret = NULL;
- Npy_releaseContext(npyarr);
- }
-
- return ret;
-}
-
-int Object_npyArrayAddItem(void *prv, JSOBJ obj, JSOBJ value) {
- PyObject *type;
- PyArray_Descr *dtype;
- npy_intp i;
- char *new_data, *item;
- NpyArrContext *npyarr = (NpyArrContext *)obj;
- PRINTMARK();
- if (!npyarr) {
- return 0;
- }
-
- i = npyarr->i;
-
- npyarr->shape.ptr[npyarr->dec->curdim - 1]++;
-
- if (PyArray_Check((PyObject *)value)) {
- // multidimensional array, keep decoding values.
- return 1;
- }
-
- if (!npyarr->ret) {
- // Array not initialised yet.
- // We do it here so we can 'sniff' the data type if none was provided
- if (!npyarr->dec->dtype) {
- type = PyObject_Type(value);
- if (!PyArray_DescrConverter(type, &dtype)) {
- Py_DECREF(type);
- goto fail;
- }
- Py_INCREF(dtype);
- Py_DECREF(type);
- } else {
- dtype = PyArray_DescrNew(npyarr->dec->dtype);
- }
-
- // If it's an object or string then fill a Python list and subsequently
- // convert. Otherwise we would need to somehow mess about with
- // reference counts when renewing memory.
- npyarr->elsize = dtype->elsize;
- if (PyDataType_REFCHK(dtype) || npyarr->elsize == 0) {
- Py_XDECREF(dtype);
-
- if (npyarr->dec->curdim > 1) {
- PyErr_SetString(PyExc_ValueError,
- "Cannot decode multidimensional arrays with "
- "variable length elements to numpy");
- goto fail;
- }
- npyarr->elcount = 0;
- npyarr->ret = PyList_New(0);
- if (!npyarr->ret) {
- goto fail;
- }
- ((JSONObjectDecoder *)npyarr->dec)->newArray = Object_npyNewArrayList;
- ((JSONObjectDecoder *)npyarr->dec)->arrayAddItem =
- Object_npyArrayListAddItem;
- ((JSONObjectDecoder *)npyarr->dec)->endArray = Object_npyEndArrayList;
- return Object_npyArrayListAddItem(prv, obj, value);
- }
-
- npyarr->ret = PyArray_NewFromDescr(&PyArray_Type, dtype, 1,
- &npyarr->elcount, NULL, NULL, 0, NULL);
-
- if (!npyarr->ret) {
- goto fail;
- }
- }
-
- if (i >= npyarr->elcount) {
- // Grow PyArray_DATA(ret):
- // this is similar for the strategy for PyListObject, but we use
- // 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ...
- if (npyarr->elsize == 0) {
- PyErr_SetString(PyExc_ValueError,
- "Cannot decode multidimensional arrays with "
- "variable length elements to numpy");
- goto fail;
- }
-
- npyarr->elcount = (i >> 1) + (i < 4 ? 4 : 2) + i;
- if (npyarr->elcount <= NPY_MAX_INTP / npyarr->elsize) {
- new_data = PyDataMem_RENEW(PyArray_DATA(npyarr->ret),
- npyarr->elcount * npyarr->elsize);
- } else {
- PyErr_NoMemory();
- goto fail;
- }
- ((PyArrayObject *)npyarr->ret)->data = (void *)new_data;
-
- // PyArray_BYTES(npyarr->ret) = new_data;
- }
-
- PyArray_DIMS(npyarr->ret)[0] = i + 1;
-
- if ((item = PyArray_GETPTR1(npyarr->ret, i)) == NULL ||
- PyArray_SETITEM(npyarr->ret, item, value) == -1) {
- goto fail;
- }
-
- Py_DECREF((PyObject *)value);
- npyarr->i++;
- return 1;
-
-fail:
-
- Npy_releaseContext(npyarr);
- return 0;
-}
-
-JSOBJ Object_npyNewArrayList(void *prv, void *_decoder) {
- PyObjectDecoder *decoder = (PyObjectDecoder *)_decoder;
- PRINTMARK();
- PyErr_SetString(PyExc_ValueError,
- "nesting not supported for object or variable length dtypes");
- Npy_releaseContext(decoder->npyarr);
- return NULL;
-}
-
-JSOBJ Object_npyEndArrayList(void *prv, JSOBJ obj) {
- PyObject *list, *ret;
- NpyArrContext *npyarr = (NpyArrContext *)obj;
- PRINTMARK();
- if (!npyarr) {
- return NULL;
- }
-
- // convert decoded list to numpy array
- list = (PyObject *)npyarr->ret;
- npyarr->ret = PyArray_FROM_O(list);
-
- ret = Npy_returnLabelled(npyarr);
- npyarr->ret = list;
-
- ((JSONObjectDecoder *)npyarr->dec)->newArray = Object_npyNewArray;
- ((JSONObjectDecoder *)npyarr->dec)->arrayAddItem = Object_npyArrayAddItem;
- ((JSONObjectDecoder *)npyarr->dec)->endArray = Object_npyEndArray;
- Npy_releaseContext(npyarr);
- return ret;
-}
-
-int Object_npyArrayListAddItem(void *prv, JSOBJ obj, JSOBJ value) {
- NpyArrContext *npyarr = (NpyArrContext *)obj;
- PRINTMARK();
- if (!npyarr) {
- return 0;
- }
- PyList_Append((PyObject *)npyarr->ret, value);
- Py_DECREF((PyObject *)value);
- npyarr->elcount++;
- return 1;
-}
-
-int Object_objectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
+static int Object_objectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
int ret = PyDict_SetItem(obj, name, value);
Py_DECREF((PyObject *)name);
Py_DECREF((PyObject *)value);
return ret == 0 ? 1 : 0;
}
-int Object_arrayAddItem(void *prv, JSOBJ obj, JSOBJ value) {
+static int Object_arrayAddItem(void *prv, JSOBJ obj, JSOBJ value) {
int ret = PyList_Append(obj, value);
Py_DECREF((PyObject *)value);
return ret == 0 ? 1 : 0;
}
-JSOBJ Object_newString(void *prv, wchar_t *start, wchar_t *end) {
+static JSOBJ Object_newString(void *prv, wchar_t *start, wchar_t *end) {
return PyUnicode_FromWideChar(start, (end - start));
}
-JSOBJ Object_newTrue(void *prv) { Py_RETURN_TRUE; }
+static JSOBJ Object_newTrue(void *prv) { Py_RETURN_TRUE; }
-JSOBJ Object_newFalse(void *prv) { Py_RETURN_FALSE; }
+static JSOBJ Object_newFalse(void *prv) { Py_RETURN_FALSE; }
-JSOBJ Object_newNull(void *prv) { Py_RETURN_NONE; }
+static JSOBJ Object_newNull(void *prv) { Py_RETURN_NONE; }
-JSOBJ Object_newPosInf(void *prv) { return PyFloat_FromDouble(Py_HUGE_VAL); }
+static JSOBJ Object_newPosInf(void *prv) {
+ return PyFloat_FromDouble(Py_HUGE_VAL);
+}
-JSOBJ Object_newNegInf(void *prv) { return PyFloat_FromDouble(-Py_HUGE_VAL); }
+static JSOBJ Object_newNegInf(void *prv) {
+ return PyFloat_FromDouble(-Py_HUGE_VAL);
+}
-JSOBJ Object_newObject(void *prv, void *decoder) { return PyDict_New(); }
+static JSOBJ Object_newObject(void *prv, void *decoder) { return PyDict_New(); }
-JSOBJ Object_endObject(void *prv, JSOBJ obj) { return obj; }
+static JSOBJ Object_endObject(void *prv, JSOBJ obj) { return obj; }
-JSOBJ Object_newArray(void *prv, void *decoder) { return PyList_New(0); }
+static JSOBJ Object_newArray(void *prv, void *decoder) { return PyList_New(0); }
-JSOBJ Object_endArray(void *prv, JSOBJ obj) { return obj; }
+static JSOBJ Object_endArray(void *prv, JSOBJ obj) { return obj; }
-JSOBJ Object_newInteger(void *prv, JSINT32 value) {
+static JSOBJ Object_newInteger(void *prv, JSINT32 value) {
return PyLong_FromLong((long)value);
}
-JSOBJ Object_newLong(void *prv, JSINT64 value) {
+static JSOBJ Object_newLong(void *prv, JSINT64 value) {
return PyLong_FromLongLong(value);
}
-JSOBJ Object_newUnsignedLong(void *prv, JSUINT64 value) {
+static JSOBJ Object_newUnsignedLong(void *prv, JSUINT64 value) {
return PyLong_FromUnsignedLongLong(value);
}
-JSOBJ Object_newDouble(void *prv, double value) {
+static JSOBJ Object_newDouble(void *prv, double value) {
return PyFloat_FromDouble(value);
}
@@ -451,7 +163,6 @@ PyObject *JSONToObj(PyObject *self, PyObject *args, PyObject *kwargs) {
dec.prv = NULL;
pyDecoder.dec = dec;
- pyDecoder.curdim = 0;
pyDecoder.npyarr = NULL;
pyDecoder.npyarr_addr = NULL;
| Not sure this code ever did anything - seems to be optimized out. Still more to be done but this works for a first pass
As far as the functions go, adding `static` is a best practice for functions that aren't meant to be exported from a shared library (C defaults functions to extern visibility, which you could argue is unfortunate) | https://api.github.com/repos/pandas-dev/pandas/pulls/56252 | 2023-11-30T07:10:36Z | 2023-11-30T17:31:06Z | 2023-11-30T17:31:06Z | 2023-11-30T20:57:08Z |
FIX: Solving Int64 precision loss when read_csv(StringIO(data), dtype… | diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py
index 5786073c9d9cc..4eec761ab76de 100644
--- a/pandas/io/parsers/arrow_parser_wrapper.py
+++ b/pandas/io/parsers/arrow_parser_wrapper.py
@@ -272,7 +272,7 @@ def read(self) -> DataFrame:
raise ParserError(e) from e
dtype_backend = self.kwds["dtype_backend"]
-
+
# Convert all pa.null() cols -> float64 (non nullable)
# else Int64 (nullable case, see below)
if dtype_backend is lib.no_default:
| …={a:Int64}, engine=pyarrow)
- [] closes #56135 and #56136
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
Enable the integer_object_nulls option by setting it to True in the following line of code in the "arrow_parser_wrapper.py" file:
table.to_pandas(types_mapper=self.kwds["dtype"].get, integer_object_nulls=True)
This adjustment is crucial when using the "pyarrow" engine as a parameter for the 'read_csv' function. It ensures that columns containing integers (Int64) with null elements within a dataframe maintain their precision. Without this setting, numpy may treat columns with null elements by converting the entire column data to Float, leading to precision loss. Enabling 'integer_object_nulls' prevents this issue and preserves the precision of integer data. | https://api.github.com/repos/pandas-dev/pandas/pulls/56251 | 2023-11-30T06:29:08Z | 2023-12-27T19:17:16Z | null | 2023-12-27T19:17:17Z |
BUG: Read CSV on python engine fails when skiprows and chunk size are specified (#55677, #56323) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 5ee2bb1778cb1..c20e667774b3c 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -566,6 +566,8 @@ MultiIndex
I/O
^^^
+- Bug in :func:`read_csv` where ``engine="python"`` did not respect ``chunksize`` arg when ``skiprows`` was specified. (:issue:`56323`)
+- Bug in :func:`read_csv` where ``engine="python"`` was causing a ``TypeError`` when a callable ``skiprows`` and a chunk size was specified. (:issue:`55677`)
- Bug in :func:`read_csv` where ``on_bad_lines="warn"`` would write to ``stderr`` instead of raise a Python warning. This now yields a :class:`.errors.ParserWarning` (:issue:`54296`)
- Bug in :func:`read_csv` with ``engine="pyarrow"`` where ``usecols`` wasn't working with a csv with no headers (:issue:`54459`)
- Bug in :func:`read_excel`, with ``engine="xlrd"`` (``xls`` files) erroring when file contains NaNs/Infs (:issue:`54564`)
diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py
index fae3293414b02..79e7554a5744c 100644
--- a/pandas/io/parsers/python_parser.py
+++ b/pandas/io/parsers/python_parser.py
@@ -1117,18 +1117,18 @@ def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
new_rows = []
try:
if rows is not None:
- rows_to_skip = 0
- if self.skiprows is not None and self.pos is not None:
- # Only read additional rows if pos is in skiprows
- rows_to_skip = len(
- set(self.skiprows) - set(range(self.pos))
- )
-
- for _ in range(rows + rows_to_skip):
+ row_index = 0
+ row_ct = 0
+ offset = self.pos if self.pos is not None else 0
+ while row_ct < rows:
# assert for mypy, data is Iterator[str] or None, would
# error in next
assert self.data is not None
- new_rows.append(next(self.data))
+ new_row = next(self.data)
+ if not self.skipfunc(offset + row_index):
+ row_ct += 1
+ row_index += 1
+ new_rows.append(new_row)
len_new_rows = len(new_rows)
new_rows = self._remove_skipped_rows(new_rows)
@@ -1137,11 +1137,11 @@ def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
rows = 0
while True:
- new_row = self._next_iter_line(row_num=self.pos + rows + 1)
+ next_row = self._next_iter_line(row_num=self.pos + rows + 1)
rows += 1
- if new_row is not None:
- new_rows.append(new_row)
+ if next_row is not None:
+ new_rows.append(next_row)
len_new_rows = len(new_rows)
except StopIteration:
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 34cae289c0f22..480d579f7f400 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -898,7 +898,7 @@ def test_skip_rows_and_n_rows():
def test_skiprows_with_iterator():
- # GH#10261
+ # GH#10261, GH#56323
data = """0
1
2
@@ -920,8 +920,8 @@ def test_skiprows_with_iterator():
)
expected_frames = [
DataFrame({"a": [3, 4]}),
- DataFrame({"a": [5, 7, 8]}, index=[2, 3, 4]),
- DataFrame({"a": []}, dtype="object"),
+ DataFrame({"a": [5, 7]}, index=[2, 3]),
+ DataFrame({"a": [8]}, index=[4]),
]
for i, result in enumerate(df_iter):
tm.assert_frame_equal(result, expected_frames[i])
diff --git a/pandas/tests/io/parser/test_skiprows.py b/pandas/tests/io/parser/test_skiprows.py
index 47c3739c979a3..749bd47d5c1a3 100644
--- a/pandas/tests/io/parser/test_skiprows.py
+++ b/pandas/tests/io/parser/test_skiprows.py
@@ -301,3 +301,29 @@ def test_skip_rows_and_n_rows(all_parsers):
result = parser.read_csv(StringIO(data), nrows=5, skiprows=[2, 4, 6])
expected = DataFrame({"a": [1, 3, 5, 7, 8], "b": ["a", "c", "e", "g", "h"]})
tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow
+def test_skip_rows_with_chunks(all_parsers):
+ # GH 55677
+ data = """col_a
+10
+20
+30
+40
+50
+60
+70
+80
+90
+100
+"""
+ parser = all_parsers
+ reader = parser.read_csv(
+ StringIO(data), engine=parser, skiprows=lambda x: x in [1, 4, 5], chunksize=4
+ )
+ df1 = next(reader)
+ df2 = next(reader)
+
+ tm.assert_frame_equal(df1, DataFrame({"col_a": [20, 30, 60, 70]}))
+ tm.assert_frame_equal(df2, DataFrame({"col_a": [80, 90, 100]}, index=[4, 5, 6]))
| -Added support for the python parser to handle using skiprows and chunk_size options at the same time to ensure API contract is met.
-Added a regression test to ensure this #55677 can be quickly caught in the future if it reappears.
-Fixed a flawed test case that now screens for #56323 regressions.
- [✅ ] closes #55677 (Replace xxxx with the GitHub issue number)
- [✅ ] closes #56323 (Replace xxxx with the GitHub issue number)
- [✅] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [✅] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [✅] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [✅] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56250 | 2023-11-30T02:24:35Z | 2023-12-05T00:27:37Z | 2023-12-05T00:27:37Z | 2023-12-05T00:27:44Z |
ENH: Disallowed ambiguous units to pd.Timedelta. | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 8cb4b3f24d435..46e011b834bdb 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -194,6 +194,7 @@ Other enhancements
- :meth:`ExtensionArray.duplicated` added to allow extension type implementations of the ``duplicated`` method (:issue:`55255`)
- Allow passing ``read_only``, ``data_only`` and ``keep_links`` arguments to openpyxl using ``engine_kwargs`` of :func:`read_excel` (:issue:`55027`)
- DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`)
+- Disallowed ambiguous units in :func:`pd.Timedelta` (:issue:`53801`)
- Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`)
- Improved error message when constructing :class:`Period` with invalid offsets such as "QS" (:issue:`55785`)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 5852a7d95d994..5b6279f4521c5 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -828,6 +828,9 @@ cpdef disallow_ambiguous_unit(unit):
"Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
+ else:
+ unknown_kwargs = {"unit": unit}
+ raise ValueError(f"Unknown keyword arguments: {unknown_kwargs}")
cdef int64_t parse_iso_format_string(str ts) except? -1:
| - [ ] closes #53801 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56249 | 2023-11-30T01:25:56Z | 2023-12-27T19:15:17Z | null | 2023-12-27T19:15:17Z |
ENH: Disallowed ambiguous units to pd.Timedelta. | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 8cb4b3f24d435..46e011b834bdb 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -194,6 +194,7 @@ Other enhancements
- :meth:`ExtensionArray.duplicated` added to allow extension type implementations of the ``duplicated`` method (:issue:`55255`)
- Allow passing ``read_only``, ``data_only`` and ``keep_links`` arguments to openpyxl using ``engine_kwargs`` of :func:`read_excel` (:issue:`55027`)
- DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`)
+- Disallowed ambiguous units in :func:`pd.Timedelta` (:issue:`53801`)
- Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`)
- Improved error message when constructing :class:`Period` with invalid offsets such as "QS" (:issue:`55785`)
| - [x] closes #53801 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56248 | 2023-11-30T01:17:20Z | 2023-11-30T01:19:13Z | null | 2023-11-30T01:19:13Z |
Added deprecation to the check_exact field in assert_series_equal. | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 8cb4b3f24d435..6da2b026d73ac 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -397,6 +397,7 @@ Other Deprecations
- Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting frequencies in :class:`Minute`, :class:`Second`, :class:`Milli`, :class:`Micro`, :class:`Nano` (:issue:`52536`)
- Deprecated the :class:`.BaseGrouper` attributes ``group_keys_seq`` and ``reconstructed_codes``; these will be removed in a future version of pandas (:issue:`56148`)
- Deprecated the :class:`.Grouping` attributes ``group_index``, ``result_index``, and ``group_arraylike``; these will be removed in a future version of pandas (:issue:`56148`)
+- Deprecated the ``check_exact`` field in :func:`.testing.assert_series_equal` (:issue:`56132`)
- Deprecated the ``errors="ignore"`` option in :func:`to_datetime`, :func:`to_timedelta`, and :func:`to_numeric`; explicitly catch exceptions instead (:issue:`54467`)
- Deprecated the ``fastpath`` keyword in the :class:`Series` constructor (:issue:`20110`)
- Deprecated the ``ordinal`` keyword in :class:`PeriodIndex`, use :meth:`PeriodIndex.from_ordinals` instead (:issue:`55960`)
@@ -405,7 +406,6 @@ Other Deprecations
- Deprecated the option ``mode.data_manager`` and the ``ArrayManager``; only the ``BlockManager`` will be available in future versions (:issue:`55043`)
- Deprecated the previous implementation of :class:`DataFrame.stack`; specify ``future_stack=True`` to adopt the future version (:issue:`53515`)
- Deprecating downcasting the results of :meth:`DataFrame.fillna`, :meth:`Series.fillna`, :meth:`DataFrame.ffill`, :meth:`Series.ffill`, :meth:`DataFrame.bfill`, :meth:`Series.bfill` in object-dtype cases. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`54261`)
--
.. ---------------------------------------------------------------------------
.. _whatsnew_220.performance:
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index 6cad71b3dfd18..87128c59bfca4 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -6,6 +6,7 @@
Literal,
cast,
)
+import warnings
import numpy as np
@@ -885,6 +886,14 @@ def assert_series_equal(
if not check_index and check_like:
raise ValueError("check_like must be False if check_index is False")
+ if check_exact:
+ warnings.warn(
+ "The 'check_exact' parameter is deprecated and will be removed in a "
+ "future version.",
+ FutureWarning,
+ stacklevel=2,
+ )
+
# instance validation
_check_isinstance(left, right, Series)
| - [x] closes #56132 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56247 | 2023-11-30T00:50:41Z | 2023-12-27T19:17:49Z | null | 2023-12-27T19:17:49Z |
Merge pull request #1 from pandas-dev/main | - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56246 | 2023-11-30T00:35:09Z | 2023-11-30T00:35:46Z | null | 2023-11-30T00:35:51Z | |
BUG: __eq__ raising for new arrow string dtype for incompatible objects | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 70039cc697b8a..cda4da9d76c42 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -604,6 +604,7 @@ Strings
- Bug in :meth:`Series.str.find` when ``start < 0`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`56411`)
- Bug in :meth:`Series.str.replace` when ``n < 0`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`56404`)
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with arguments of type ``tuple[str, ...]`` for ``string[pyarrow]`` (:issue:`54942`)
+- Bug in comparison operations for ``dtype="string[pyarrow_numpy]"`` raising if dtypes can't be compared (:issue:`56008`)
Interval
^^^^^^^^
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 32ab3054c0f51..50cd052f80abd 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -41,6 +41,7 @@
BaseStringArray,
StringDtype,
)
+from pandas.core.ops import invalid_comparison
from pandas.core.strings.object_array import ObjectStringArrayMixin
if not pa_version_under10p1:
@@ -662,7 +663,10 @@ def _convert_int_dtype(self, result):
return result
def _cmp_method(self, other, op):
- result = super()._cmp_method(other, op)
+ try:
+ result = super()._cmp_method(other, op)
+ except pa.ArrowNotImplementedError:
+ return invalid_comparison(self, other, op)
if op == operator.ne:
return result.to_numpy(np.bool_, na_value=True)
else:
diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py
index 153b4bfaaf444..d9c94e871bd4b 100644
--- a/pandas/tests/series/test_logical_ops.py
+++ b/pandas/tests/series/test_logical_ops.py
@@ -530,3 +530,19 @@ def test_int_dtype_different_index_not_bool(self):
result = ser1 ^ ser2
tm.assert_series_equal(result, expected)
+
+ def test_pyarrow_numpy_string_invalid(self):
+ # GH#56008
+ pytest.importorskip("pyarrow")
+ ser = Series([False, True])
+ ser2 = Series(["a", "b"], dtype="string[pyarrow_numpy]")
+ result = ser == ser2
+ expected = Series(False, index=ser.index)
+ tm.assert_series_equal(result, expected)
+
+ result = ser != ser2
+ expected = Series(True, index=ser.index)
+ tm.assert_series_equal(result, expected)
+
+ with pytest.raises(TypeError, match="Invalid comparison"):
+ ser > ser2
| - [ ] closes #56008 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56245 | 2023-11-29T23:22:39Z | 2023-12-21T22:45:36Z | 2023-12-21T22:45:36Z | 2023-12-21T22:45:39Z |
DEPR: Deprecate dtype inference on pandas objects | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 8209525721b98..89a03ddbef2a2 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -460,6 +460,7 @@ Other Deprecations
- Deprecated behavior of :meth:`Index.insert` with an object-dtype index silently performing type inference on the result, explicitly call ``result.infer_objects(copy=False)`` for the old behavior instead (:issue:`51363`)
- Deprecated casting non-datetimelike values (mainly strings) in :meth:`Series.isin` and :meth:`Index.isin` with ``datetime64``, ``timedelta64``, and :class:`PeriodDtype` dtypes (:issue:`53111`)
- Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`)
+- Deprecated dtype inference in :class:`Index`, :class:`Series` and :class:`DataFrame` constructors when giving a pandas input, call ``.infer_objects`` on the input to keep the current behavior (:issue:`56012`)
- Deprecated dtype inference when setting a :class:`Index` into a :class:`DataFrame`, cast explicitly instead (:issue:`56102`)
- Deprecated including the groups in computations when using :meth:`.DataFrameGroupBy.apply` and :meth:`.DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`)
- Deprecated indexing an :class:`Index` with a boolean indexer of length zero (:issue:`55820`)
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 69b2b0876fc80..672c16a85086c 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -10,6 +10,7 @@
ContextManager,
cast,
)
+import warnings
import numpy as np
@@ -285,11 +286,17 @@ def box_expected(expected, box_cls, transpose: bool = True):
else:
expected = pd.array(expected, copy=False)
elif box_cls is Index:
- expected = Index(expected)
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
+ expected = Index(expected)
elif box_cls is Series:
- expected = Series(expected)
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
+ expected = Series(expected)
elif box_cls is DataFrame:
- expected = Series(expected).to_frame()
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
+ expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e741fa7b37f33..e1048a51089ac 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -726,6 +726,10 @@ def __init__(
manager = _get_option("mode.data_manager", silent=True)
+ is_pandas_object = isinstance(data, (Series, Index, ExtensionArray))
+ data_dtype = getattr(data, "dtype", None)
+ original_dtype = dtype
+
# GH47215
if isinstance(index, set):
raise ValueError("index cannot be a set")
@@ -912,6 +916,18 @@ def __init__(
NDFrame.__init__(self, mgr)
+ if original_dtype is None and is_pandas_object and data_dtype == np.object_:
+ if self.dtypes.iloc[0] != data_dtype:
+ warnings.warn(
+ "Dtype inference on a pandas object "
+ "(Series, Index, ExtensionArray) is deprecated. The DataFrame "
+ "constructor will keep the original dtype in the future. "
+ "Call `infer_objects` on the result to get the old "
+ "behavior.",
+ FutureWarning,
+ stacklevel=2,
+ )
+
# ----------------------------------------------------------------------
def __dataframe__(
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 5dc4a85ba9792..e01c4e75e9f8a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -492,6 +492,8 @@ def __new__(
if not copy and isinstance(data, (ABCSeries, Index)):
refs = data._references
+ is_pandas_object = isinstance(data, (ABCSeries, Index, ExtensionArray))
+
# range
if isinstance(data, (range, RangeIndex)):
result = RangeIndex(start=data, copy=copy, name=name)
@@ -571,7 +573,19 @@ def __new__(
klass = cls._dtype_to_subclass(arr.dtype)
arr = klass._ensure_array(arr, arr.dtype, copy=False)
- return klass._simple_new(arr, name, refs=refs)
+ result = klass._simple_new(arr, name, refs=refs)
+ if dtype is None and is_pandas_object and data_dtype == np.object_:
+ if result.dtype != data_dtype:
+ warnings.warn(
+ "Dtype inference on a pandas object "
+ "(Series, Index, ExtensionArray) is deprecated. The Index "
+ "constructor will keep the original dtype in the future. "
+ "Call `infer_objects` on the result to get the old "
+ "behavior.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ return result # type: ignore[return-value]
@classmethod
def _ensure_array(cls, data, dtype, copy: bool):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e4dca97bc645d..d46068b6338c5 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -424,6 +424,10 @@ def __init__(
self.name = name
return
+ is_pandas_object = isinstance(data, (Series, Index, ExtensionArray))
+ data_dtype = getattr(data, "dtype", None)
+ original_dtype = dtype
+
if isinstance(data, (ExtensionArray, np.ndarray)):
if copy is not False and using_copy_on_write():
if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)):
@@ -581,6 +585,17 @@ def __init__(
self.name = name
self._set_axis(0, index)
+ if original_dtype is None and is_pandas_object and data_dtype == np.object_:
+ if self.dtype != data_dtype:
+ warnings.warn(
+ "Dtype inference on a pandas object "
+ "(Series, Index, ExtensionArray) is deprecated. The Series "
+ "constructor will keep the original dtype in the future. "
+ "Call `infer_objects` on the result to get the old behavior.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
def _init_dict(
self, data, index: Index | None = None, dtype: DtypeObj | None = None
):
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 75866c6f6013a..1b7d632c0fa80 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -689,19 +689,18 @@ def cat(
result = cat_safe(all_cols, sep)
out: Index | Series
+ if isinstance(self._orig.dtype, CategoricalDtype):
+ # We need to infer the new categories.
+ dtype = self._orig.dtype.categories.dtype
+ else:
+ dtype = self._orig.dtype
if isinstance(self._orig, ABCIndex):
# add dtype for case that result is all-NA
- dtype = None
if isna(result).all():
- dtype = object
+ dtype = object # type: ignore[assignment]
out = Index(result, dtype=dtype, name=self._orig.name)
else: # Series
- if isinstance(self._orig.dtype, CategoricalDtype):
- # We need to infer the new categories.
- dtype = self._orig.dtype.categories.dtype # type: ignore[assignment]
- else:
- dtype = self._orig.dtype
res_ser = Series(
result, dtype=dtype, index=data.index, name=self._orig.name, copy=False
)
diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py
index 7d5c485958039..1aa458a625028 100644
--- a/pandas/tests/copy_view/test_constructors.py
+++ b/pandas/tests/copy_view/test_constructors.py
@@ -314,7 +314,8 @@ def test_dataframe_from_series_or_index_different_dtype(using_copy_on_write, con
def test_dataframe_from_series_infer_datetime(using_copy_on_write):
ser = Series([Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype=object)
- df = DataFrame(ser)
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ df = DataFrame(ser)
assert not np.shares_memory(get_array(ser), get_array(df, 0))
if using_copy_on_write:
assert df._mgr._has_no_reference(0)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index e1abd0344e356..4c4760501db4e 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2768,6 +2768,23 @@ def test_frame_string_inference_block_dim(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
assert df._mgr.blocks[0].ndim == 2
+ def test_inference_on_pandas_objects(self):
+ # GH#56012
+ idx = Index([Timestamp("2019-12-31")], dtype=object)
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ result = DataFrame(idx, columns=["a"])
+ assert result.dtypes.iloc[0] != np.object_
+ result = DataFrame({"a": idx})
+ assert result.dtypes.iloc[0] == np.object_
+
+ ser = Series([Timestamp("2019-12-31")], dtype=object)
+
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ result = DataFrame(ser, columns=["a"])
+ assert result.dtypes.iloc[0] != np.object_
+ result = DataFrame({"a": ser})
+ assert result.dtypes.iloc[0] == np.object_
+
class TestDataFrameConstructorIndexInference:
def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self):
diff --git a/pandas/tests/indexes/base_class/test_constructors.py b/pandas/tests/indexes/base_class/test_constructors.py
index 60abbfc441e8e..fd5176a28565e 100644
--- a/pandas/tests/indexes/base_class/test_constructors.py
+++ b/pandas/tests/indexes/base_class/test_constructors.py
@@ -5,6 +5,7 @@
from pandas import (
Index,
MultiIndex,
+ Series,
)
import pandas._testing as tm
@@ -57,3 +58,16 @@ def test_index_string_inference(self):
with pd.option_context("future.infer_string", True):
ser = Index(["a", 1])
tm.assert_index_equal(ser, expected)
+
+ def test_inference_on_pandas_objects(self):
+ # GH#56012
+ idx = Index([pd.Timestamp("2019-12-31")], dtype=object)
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ result = Index(idx)
+ assert result.dtype != np.object_
+
+ ser = Series([pd.Timestamp("2019-12-31")], dtype=object)
+
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ result = Index(ser)
+ assert result.dtype != np.object_
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 185e34efdc177..666d92064c86c 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -104,7 +104,8 @@ def test_constructor_copy(self, index, using_infer_string):
)
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
- result = Index(index.astype(object))
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ result = Index(index.astype(object))
else:
result = Index(index)
diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py
index 083a4c4b24adb..34465a7c12c18 100644
--- a/pandas/tests/series/accessors/test_dt_accessor.py
+++ b/pandas/tests/series/accessors/test_dt_accessor.py
@@ -259,9 +259,9 @@ def test_dt_accessor_limited_display_api(self):
tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))
# Period
- ser = Series(
- period_range("20130101", periods=5, freq="D", name="xxx").astype(object)
- )
+ idx = period_range("20130101", periods=5, freq="D", name="xxx").astype(object)
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ ser = Series(idx)
results = get_dir(ser)
tm.assert_almost_equal(
results, sorted(set(ok_for_period + ok_for_period_methods))
diff --git a/pandas/tests/series/methods/test_between.py b/pandas/tests/series/methods/test_between.py
index 8f4931beae589..3913419038876 100644
--- a/pandas/tests/series/methods/test_between.py
+++ b/pandas/tests/series/methods/test_between.py
@@ -20,7 +20,7 @@ def test_between(self):
tm.assert_series_equal(result, expected)
def test_between_datetime_object_dtype(self):
- ser = Series(bdate_range("1/1/2000", periods=20).astype(object))
+ ser = Series(bdate_range("1/1/2000", periods=20), dtype=object)
ser[::2] = np.nan
result = ser[ser.between(ser[3], ser[17])]
diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py
index b94723b7cbddf..875ffdd3fe851 100644
--- a/pandas/tests/series/methods/test_equals.py
+++ b/pandas/tests/series/methods/test_equals.py
@@ -82,13 +82,15 @@ def test_equals_matching_nas():
left = Series([np.datetime64("NaT")], dtype=object)
right = Series([np.datetime64("NaT")], dtype=object)
assert left.equals(right)
- assert Index(left).equals(Index(right))
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.timedelta64("NaT")], dtype=object)
right = Series([np.timedelta64("NaT")], dtype=object)
assert left.equals(right)
- assert Index(left).equals(Index(right))
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.float64("NaN")], dtype=object)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 0e6f1c284a988..5f591b4b22f1c 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1316,7 +1316,8 @@ def test_constructor_periodindex(self):
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
- expected = Series(pi.astype(object))
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
@@ -2137,6 +2138,20 @@ def test_series_string_inference_na_first(self):
result = Series([pd.NA, "b"])
tm.assert_series_equal(result, expected)
+ def test_inference_on_pandas_objects(self):
+ # GH#56012
+ ser = Series([Timestamp("2019-12-31")], dtype=object)
+ with tm.assert_produces_warning(None):
+ # This doesn't do inference
+ result = Series(ser)
+ assert result.dtype == np.object_
+
+ idx = Index([Timestamp("2019-12-31")], dtype=object)
+
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ result = Series(idx)
+ assert result.dtype != np.object_
+
class TestSeriesConstructorIndexCoercion:
def test_series_constructor_datetimelike_index_coercion(self):
diff --git a/pandas/tests/strings/test_cat.py b/pandas/tests/strings/test_cat.py
index 284932491a65e..c1e7ad6e02779 100644
--- a/pandas/tests/strings/test_cat.py
+++ b/pandas/tests/strings/test_cat.py
@@ -98,14 +98,18 @@ def test_str_cat_categorical(
with option_context("future.infer_string", infer_string):
s = Index(["a", "a", "b", "a"], dtype=dtype_caller)
- s = s if box == Index else Series(s, index=s)
+ s = s if box == Index else Series(s, index=s, dtype=s.dtype)
t = Index(["b", "a", "b", "c"], dtype=dtype_target)
- expected = Index(["ab", "aa", "bb", "ac"])
+ expected = Index(
+ ["ab", "aa", "bb", "ac"], dtype=object if dtype_caller == "object" else None
+ )
expected = (
expected
if box == Index
- else Series(expected, index=Index(s, dtype=dtype_caller))
+ else Series(
+ expected, index=Index(s, dtype=dtype_caller), dtype=expected.dtype
+ )
)
# Series/Index with unaligned Index -> t.values
@@ -123,12 +127,19 @@ def test_str_cat_categorical(
# Series/Index with Series having different Index
t = Series(t.values, index=t.values)
- expected = Index(["aa", "aa", "bb", "bb", "aa"])
+ expected = Index(
+ ["aa", "aa", "bb", "bb", "aa"],
+ dtype=object if dtype_caller == "object" else None,
+ )
dtype = object if dtype_caller == "object" else s.dtype.categories.dtype
expected = (
expected
if box == Index
- else Series(expected, index=Index(expected.str[:1], dtype=dtype))
+ else Series(
+ expected,
+ index=Index(expected.str[:1], dtype=dtype),
+ dtype=expected.dtype,
+ )
)
result = s.str.cat(t, sep=sep)
diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py
index 45741e852fef7..99a504f4188c1 100644
--- a/pandas/tests/tseries/frequencies/test_inference.py
+++ b/pandas/tests/tseries/frequencies/test_inference.py
@@ -23,6 +23,7 @@
date_range,
period_range,
)
+import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
@@ -206,7 +207,8 @@ def test_infer_freq_custom(base_delta_code_pair, constructor):
)
def test_infer_freq_index(freq, expected):
rng = period_range("1959Q2", "2009Q3", freq=freq)
- rng = Index(rng.to_timestamp("D", how="e").astype(object))
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
+ rng = Index(rng.to_timestamp("D", how="e").astype(object))
assert rng.inferred_freq == expected
| - [ ] closes #56012 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This is very weird to begin with and becomes a real PITA when we infer strings as arrow backed strings | https://api.github.com/repos/pandas-dev/pandas/pulls/56244 | 2023-11-29T22:26:14Z | 2023-12-21T22:45:21Z | 2023-12-21T22:45:21Z | 2023-12-21T22:45:24Z |
ENH: Raise TypeError when converting DatetimeIndex to PeriodIndex with invalid period frequency | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 8cb4b3f24d435..85dfc3ebde873 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -195,6 +195,7 @@ Other enhancements
- Allow passing ``read_only``, ``data_only`` and ``keep_links`` arguments to openpyxl using ``engine_kwargs`` of :func:`read_excel` (:issue:`55027`)
- DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`)
- Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`)
+- Improved error message that appears in :meth:`DatetimeIndex.to_period` with frequencies which are not supported as period frequencies, such as "BMS" (:issue:`56243`)
- Improved error message when constructing :class:`Period` with invalid offsets such as "QS" (:issue:`55785`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 57b244e8d02e9..a8c21cfbb6e2f 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1174,7 +1174,12 @@ def dt64arr_to_periodarr(
reso = get_unit_from_dtype(data.dtype)
freq = Period._maybe_convert_freq(freq)
- base = freq._period_dtype_code
+ try:
+ base = freq._period_dtype_code
+ except (AttributeError, TypeError):
+ # AttributeError: _period_dtype_code might not exist
+ # TypeError: _period_dtype_code might intentionally raise
+ raise TypeError(f"{freq.name} is not supported as period frequency")
return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
diff --git a/pandas/tests/indexes/datetimes/methods/test_to_period.py b/pandas/tests/indexes/datetimes/methods/test_to_period.py
index aa217e895c30a..2c68ddd3d3d15 100644
--- a/pandas/tests/indexes/datetimes/methods/test_to_period.py
+++ b/pandas/tests/indexes/datetimes/methods/test_to_period.py
@@ -230,3 +230,11 @@ def test_to_period_nofreq(self):
idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
assert idx.freqstr is None
tm.assert_index_equal(idx.to_period(), expected)
+
+ @pytest.mark.parametrize("freq", ["2BMS", "1SME-15"])
+ def test_to_period_offsets_not_supported(self, freq):
+ # GH#56243
+ msg = f"{freq[1:]} is not supported as period frequency"
+ ts = date_range("1/1/2012", periods=4, freq=freq)
+ with pytest.raises(TypeError, match=msg):
+ ts.to_period()
| xref #55844
Added to `dt64arr_to_periodarr` check if frequency is supported as period frequency, if not a TypeError is raised.
The reason: so far in the example below
```
>>> ts = pd.date_range('1/1/2012', periods=4, freq="BMS")
>>> ts.to_period()
```
we get
`AttributeError: 'pandas._libs.tslibs.offsets.BusinessMonthBegin' object has no attribute '_period_dtype_code'`
| https://api.github.com/repos/pandas-dev/pandas/pulls/56243 | 2023-11-29T22:03:13Z | 2023-12-04T11:14:58Z | 2023-12-04T11:14:58Z | 2023-12-04T11:15:02Z |
CoW: Remove false positive warnings for inplace operators | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 56001fabfdc9d..30646cea706e5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -12500,6 +12500,7 @@ def _inplace_method(self, other, op) -> Self:
and result._indexed_same(self)
and result.dtype == self.dtype
and not using_copy_on_write()
+ and not (warn_copy_on_write() and not warn)
):
# GH#36498 this inplace op can _actually_ be inplace.
# Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager,
diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py
index 787b77a5c725a..0f27eae1a3bfc 100644
--- a/pandas/tests/frame/methods/test_quantile.py
+++ b/pandas/tests/frame/methods/test_quantile.py
@@ -110,8 +110,6 @@ def test_non_numeric_exclusion(self, interp_method, request, using_array_manager
request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set."))
tm.assert_series_equal(rs, xp)
- # TODO(CoW-warn) should not need to warn
- @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
def test_axis(self, interp_method, request, using_array_manager):
# axis
interpolation, method = interp_method
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 5b17484de9c93..3c6419ab86494 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -41,8 +41,6 @@ def test_repr():
assert result == expected
-# TODO(CoW-warn) this should NOT warn -> inplace operator triggers it
-@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
def test_groupby_std_datetimelike(warn_copy_on_write):
# GH#48481
tdi = pd.timedelta_range("1 Day", periods=10000)
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index bf0975a803dce..a0ece7cd72cd8 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -121,11 +121,7 @@ def test_setitem_cache_updating_slices(
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
for ix, row in df.iterrows():
- # TODO(CoW-warn) should not warn
- with tm.assert_produces_warning(
- FutureWarning if warn_copy_on_write else None
- ):
- out.loc[six:eix, row["C"]] += row["D"]
+ out.loc[six:eix, row["C"]] += row["D"]
tm.assert_frame_equal(out, expected)
tm.assert_series_equal(out["A"], expected["A"])
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index baf2cdae43fe4..31263b44ed205 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -426,9 +426,7 @@ def test_iloc_getitem_slice_dups(self):
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
- # TODO(CoW-warn) this should NOT warn -> Series inplace operator
- @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
- def test_iloc_setitem(self):
+ def test_iloc_setitem(self, warn_copy_on_write):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=np.arange(0, 8, 2),
@@ -1147,7 +1145,7 @@ def test_iloc_getitem_with_duplicates2(self):
expected = df.take([0], axis=1)
tm.assert_frame_equal(result, expected)
- def test_iloc_interval(self, warn_copy_on_write):
+ def test_iloc_interval(self):
# GH#17130
df = DataFrame({Interval(1, 2): [1, 2]})
@@ -1160,9 +1158,7 @@ def test_iloc_interval(self, warn_copy_on_write):
tm.assert_series_equal(result, expected)
result = df.copy()
- # TODO(CoW-warn) false positive
- with tm.assert_cow_warning(warn_copy_on_write):
- result.iloc[:, 0] += 1
+ result.iloc[:, 0] += 1
expected = DataFrame({Interval(1, 2): [2, 3]})
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
xref https://github.com/pandas-dev/pandas/issues/56019 | https://api.github.com/repos/pandas-dev/pandas/pulls/56242 | 2023-11-29T21:11:31Z | 2023-12-04T10:13:33Z | 2023-12-04T10:13:33Z | 2023-12-04T11:05:37Z |
TST/CLN: Remove getSeriesData/makeObjectSeries/makeDatetimeIndex | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index b1918e1b1d7c2..ead00cd778d7b 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -2,7 +2,6 @@
import collections
from collections import Counter
-from datetime import datetime
from decimal import Decimal
import operator
import os
@@ -36,12 +35,10 @@
ArrowDtype,
Categorical,
DataFrame,
- DatetimeIndex,
Index,
MultiIndex,
RangeIndex,
Series,
- bdate_range,
date_range,
period_range,
timedelta_range,
@@ -348,34 +345,12 @@ def getCols(k) -> str:
return string.ascii_uppercase[:k]
-def makeDateIndex(
- k: int = 10, freq: Frequency = "B", name=None, **kwargs
-) -> DatetimeIndex:
- dt = datetime(2000, 1, 1)
- dr = bdate_range(dt, periods=k, freq=freq, name=name)
- return DatetimeIndex(dr, name=name, **kwargs)
-
-
-def makeObjectSeries(name=None) -> Series:
- data = [f"foo_{i}" for i in range(_N)]
- index = Index([f"bar_{i}" for i in range(_N)])
- return Series(data, index=index, name=name, dtype=object)
-
-
-def getSeriesData() -> dict[str, Series]:
- index = Index([f"foo_{i}" for i in range(_N)])
- return {
- c: Series(np.random.default_rng(i).standard_normal(_N), index=index)
- for i, c in enumerate(getCols(_K))
- }
-
-
def makeTimeSeries(nper=None, freq: Frequency = "B", name=None) -> Series:
if nper is None:
nper = _N
return Series(
np.random.default_rng(2).standard_normal(nper),
- index=makeDateIndex(nper, freq=freq),
+ index=date_range("2000-01-01", periods=nper, freq=freq),
name=name,
)
@@ -390,11 +365,6 @@ def makeTimeDataFrame(nper=None, freq: Frequency = "B") -> DataFrame:
return DataFrame(data)
-def makeDataFrame() -> DataFrame:
- data = getSeriesData()
- return DataFrame(data)
-
-
def makeCustomIndex(
nentries,
nlevels,
@@ -925,16 +895,12 @@ def shares_memory(left, right) -> bool:
"get_finest_unit",
"get_obj",
"get_op_from_name",
- "getSeriesData",
"getTimeSeriesData",
"iat",
"iloc",
"loc",
"makeCustomDataframe",
"makeCustomIndex",
- "makeDataFrame",
- "makeDateIndex",
- "makeObjectSeries",
"makeTimeDataFrame",
"makeTimeSeries",
"maybe_produces_warning",
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 1bc067eb32aef..9ed6f8f43ae03 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -68,6 +68,7 @@
Series,
Timedelta,
Timestamp,
+ date_range,
period_range,
timedelta_range,
)
@@ -608,15 +609,15 @@ def _create_mi_with_dt64tz_level():
"""
# GH#8367 round trip with pickle
return MultiIndex.from_product(
- [[1, 2], ["a", "b"], pd.date_range("20130101", periods=3, tz="US/Eastern")],
+ [[1, 2], ["a", "b"], date_range("20130101", periods=3, tz="US/Eastern")],
names=["one", "two", "three"],
)
indices_dict = {
"string": Index([f"pandas_{i}" for i in range(100)]),
- "datetime": tm.makeDateIndex(100),
- "datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
+ "datetime": date_range("2020-01-01", periods=100),
+ "datetime-tz": date_range("2020-01-01", periods=100, tz="US/Pacific"),
"period": period_range("2020-01-01", periods=100, freq="D"),
"timedelta": timedelta_range(start="1 day", periods=100, freq="D"),
"range": RangeIndex(100),
@@ -631,7 +632,7 @@ def _create_mi_with_dt64tz_level():
"float32": Index(np.arange(100), dtype="float32"),
"float64": Index(np.arange(100), dtype="float64"),
"bool-object": Index([True, False] * 5, dtype=object),
- "bool-dtype": Index(np.random.default_rng(2).standard_normal(10) < 0),
+ "bool-dtype": Index([True, False] * 5, dtype=bool),
"complex64": Index(
np.arange(100, dtype="complex64") + 1.0j * np.arange(100, dtype="complex64")
),
@@ -751,9 +752,9 @@ def object_series() -> Series:
"""
Fixture for Series of dtype object with Index of unique strings
"""
- s = tm.makeObjectSeries()
- s.name = "objects"
- return s
+ data = [f"foo_{i}" for i in range(30)]
+ index = Index([f"bar_{i}" for i in range(30)], dtype=object)
+ return Series(data, index=index, name="objects", dtype=object)
@pytest.fixture
@@ -839,27 +840,12 @@ def int_frame() -> DataFrame:
Fixture for DataFrame of ints with index of unique strings
Columns are ['A', 'B', 'C', 'D']
-
- A B C D
- vpBeWjM651 1 0 1 0
- 5JyxmrP1En -1 0 0 0
- qEDaoD49U2 -1 1 0 0
- m66TkTfsFe 0 0 0 0
- EHPaNzEUFm -1 0 -1 0
- fpRJCevQhi 2 0 0 0
- OlQvnmfi3Q 0 0 -2 0
- ... .. .. .. ..
- uB1FPlz4uP 0 0 0 1
- EcSe6yNzCU 0 0 -1 0
- L50VudaiI8 -1 1 -2 0
- y3bpw4nwIp 0 -1 0 0
- H0RdLLwrCT 1 1 0 0
- rY82K0vMwm 0 0 0 0
- 1OPIUjnkjk 2 0 0 0
-
- [30 rows x 4 columns]
"""
- return DataFrame(tm.getSeriesData()).astype("int64")
+ return DataFrame(
+ np.ones((30, 4), dtype=np.int64),
+ index=Index([f"foo_{i}" for i in range(30)], dtype=object),
+ columns=Index(list("ABCD"), dtype=object),
+ )
@pytest.fixture
@@ -868,27 +854,12 @@ def float_frame() -> DataFrame:
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
-
- A B C D
- P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465
- qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901
- tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433
- wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651
- M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938
- QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053
- r78Jwns6dn -0.653707 0.883127 0.682199 0.206159
- ... ... ... ... ...
- IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316
- lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999
- qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121
- yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962
- 65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987
- eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871
- xSucinXxuV -1.263557 0.252799 -0.552247 0.400426
-
- [30 rows x 4 columns]
- """
- return DataFrame(tm.getSeriesData())
+ """
+ return DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ index=Index([f"foo_{i}" for i in range(30)], dtype=object),
+ columns=Index(list("ABCD"), dtype=object),
+ )
@pytest.fixture
diff --git a/pandas/tests/apply/test_numba.py b/pandas/tests/apply/test_numba.py
index 85d7baee1bdf5..57b81711ddb48 100644
--- a/pandas/tests/apply/test_numba.py
+++ b/pandas/tests/apply/test_numba.py
@@ -60,9 +60,10 @@ def test_numba_vs_python_indexing():
"reduction",
[lambda x: x.mean(), lambda x: x.min(), lambda x: x.max(), lambda x: x.sum()],
)
-def test_numba_vs_python_reductions(float_frame, reduction, apply_axis):
- result = float_frame.apply(reduction, engine="numba", axis=apply_axis)
- expected = float_frame.apply(reduction, engine="python", axis=apply_axis)
+def test_numba_vs_python_reductions(reduction, apply_axis):
+ df = DataFrame(np.ones((4, 4), dtype=np.float64))
+ result = df.apply(reduction, engine="numba", axis=apply_axis)
+ expected = df.apply(reduction, engine="python", axis=apply_axis)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 9014ba4b6093e..4bd0e6c1c3694 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -394,7 +394,7 @@ def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
- index = tm.makeDateIndex(100)
+ index = date_range("2020-01-01", periods=10)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index 6b36f447eb7d5..7d27f940daa4c 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -169,8 +169,7 @@ def test_objarr_add_invalid(self, op, box_with_array):
# invalid ops
box = box_with_array
- obj_ser = tm.makeObjectSeries()
- obj_ser.name = "objects"
+ obj_ser = Series(list("abc"), dtype=object, name="objects")
obj_ser = tm.box_expected(obj_ser, box)
msg = "|".join(
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 88cec50c08aba..e1f8d8eca2537 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -78,8 +78,12 @@ def test_notna_notnull(notna_f):
@pytest.mark.parametrize(
"ser",
[
- tm.makeObjectSeries(),
- tm.makeTimeSeries(),
+ Series(
+ [str(i) for i in range(5)],
+ index=Index([str(i) for i in range(5)], dtype=object),
+ dtype=object,
+ ),
+ Series(range(5), date_range("2020-01-01", periods=5)),
Series(range(5), period_range("2020-01-01", periods=5)),
],
)
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index f7ed5180b46d9..99ea565e5b60c 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -3,6 +3,7 @@
from pandas import (
DataFrame,
+ Index,
NaT,
date_range,
)
@@ -44,27 +45,12 @@ def float_string_frame():
Fixture for DataFrame of floats and strings with index of unique strings
Columns are ['A', 'B', 'C', 'D', 'foo'].
-
- A B C D foo
- w3orJvq07g -1.594062 -1.084273 -1.252457 0.356460 bar
- PeukuVdmz2 0.109855 -0.955086 -0.809485 0.409747 bar
- ahp2KvwiM8 -1.533729 -0.142519 -0.154666 1.302623 bar
- 3WSJ7BUCGd 2.484964 0.213829 0.034778 -2.327831 bar
- khdAmufk0U -0.193480 -0.743518 -0.077987 0.153646 bar
- LE2DZiFlrE -0.193566 -1.343194 -0.107321 0.959978 bar
- HJXSJhVn7b 0.142590 1.257603 -0.659409 -0.223844 bar
- ... ... ... ... ... ...
- 9a1Vypttgw -1.316394 1.601354 0.173596 1.213196 bar
- h5d1gVFbEy 0.609475 1.106738 -0.155271 0.294630 bar
- mK9LsTQG92 1.303613 0.857040 -1.019153 0.369468 bar
- oOLksd9gKH 0.558219 -0.134491 -0.289869 -0.951033 bar
- 9jgoOjKyHg 0.058270 -0.496110 -0.413212 -0.852659 bar
- jZLDHclHAO 0.096298 1.267510 0.549206 -0.005235 bar
- lR0nxDp1C2 -2.119350 -0.794384 0.544118 0.145849 bar
-
- [30 rows x 5 columns]
"""
- df = DataFrame(tm.getSeriesData())
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ index=Index([f"foo_{i}" for i in range(30)], dtype=object),
+ columns=Index(list("ABCD"), dtype=object),
+ )
df["foo"] = "bar"
return df
@@ -75,31 +61,18 @@ def mixed_float_frame():
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
-
- A B C D
- GI7bbDaEZe -0.237908 -0.246225 -0.468506 0.752993
- KGp9mFepzA -1.140809 -0.644046 -1.225586 0.801588
- VeVYLAb1l2 -1.154013 -1.677615 0.690430 -0.003731
- kmPME4WKhO 0.979578 0.998274 -0.776367 0.897607
- CPyopdXTiz 0.048119 -0.257174 0.836426 0.111266
- 0kJZQndAj0 0.274357 -0.281135 -0.344238 0.834541
- tqdwQsaHG8 -0.979716 -0.519897 0.582031 0.144710
- ... ... ... ... ...
- 7FhZTWILQj -2.906357 1.261039 -0.780273 -0.537237
- 4pUDPM4eGq -2.042512 -0.464382 -0.382080 1.132612
- B8dUgUzwTi -1.506637 -0.364435 1.087891 0.297653
- hErlVYjVv9 1.477453 -0.495515 -0.713867 1.438427
- 1BKN3o7YLs 0.127535 -0.349812 -0.881836 0.489827
- 9S4Ekn7zga 1.445518 -2.095149 0.031982 0.373204
- xN1dNn6OV6 1.425017 -0.983995 -0.363281 -0.224502
-
- [30 rows x 4 columns]
"""
- df = DataFrame(tm.getSeriesData())
- df.A = df.A.astype("float32")
- df.B = df.B.astype("float32")
- df.C = df.C.astype("float16")
- df.D = df.D.astype("float64")
+ df = DataFrame(
+ {
+ col: np.random.default_rng(2).random(30, dtype=dtype)
+ for col, dtype in zip(
+ list("ABCD"), ["float32", "float32", "float32", "float64"]
+ )
+ },
+ index=Index([f"foo_{i}" for i in range(30)], dtype=object),
+ )
+ # not supported by numpy random
+ df["C"] = df["C"].astype("float16")
return df
@@ -109,32 +82,14 @@ def mixed_int_frame():
Fixture for DataFrame of different int types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
-
- A B C D
- mUrCZ67juP 0 1 2 2
- rw99ACYaKS 0 1 0 0
- 7QsEcpaaVU 0 1 1 1
- xkrimI2pcE 0 1 0 0
- dz01SuzoS8 0 1 255 255
- ccQkqOHX75 -1 1 0 0
- DN0iXaoDLd 0 1 0 0
- ... .. .. ... ...
- Dfb141wAaQ 1 1 254 254
- IPD8eQOVu5 0 1 0 0
- CcaKulsCmv 0 1 0 0
- rIBa8gu7E5 0 1 0 0
- RP6peZmh5o 0 1 1 1
- NMb9pipQWQ 0 1 0 0
- PqgbJEzjib 0 1 3 3
-
- [30 rows x 4 columns]
"""
- df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
- df.A = df.A.astype("int32")
- df.B = np.ones(len(df.B), dtype="uint64")
- df.C = df.C.astype("uint8")
- df.D = df.C.astype("int64")
- return df
+ return DataFrame(
+ {
+ col: np.ones(30, dtype=dtype)
+ for col, dtype in zip(list("ABCD"), ["int32", "uint64", "uint8", "int64"])
+ },
+ index=Index([f"foo_{i}" for i in range(30)], dtype=object),
+ )
@pytest.fixture
diff --git a/pandas/tests/frame/methods/test_info.py b/pandas/tests/frame/methods/test_info.py
index 7d9e0fe90f44c..fcb7677f03f27 100644
--- a/pandas/tests/frame/methods/test_info.py
+++ b/pandas/tests/frame/methods/test_info.py
@@ -532,11 +532,11 @@ def test_info_compute_numba():
with option_context("compute.use_numba", True):
buf = StringIO()
- df.info()
+ df.info(buf=buf)
result = buf.getvalue()
buf = StringIO()
- df.info()
+ df.info(buf=buf)
expected = buf.getvalue()
assert result == expected
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 1ca9ec6feecae..b079c331eeebb 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -156,36 +156,18 @@ def bool_frame_with_na():
Fixture for DataFrame of booleans with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
-
- A B C D
- zBZxY2IDGd False False False False
- IhBWBMWllt False True True True
- ctjdvZSR6R True False True True
- AVTujptmxb False True False True
- G9lrImrSWq False False False True
- sFFwdIUfz2 NaN NaN NaN NaN
- s15ptEJnRb NaN NaN NaN NaN
- ... ... ... ... ...
- UW41KkDyZ4 True True False False
- l9l6XkOdqV True False False False
- X2MeZfzDYA False True False False
- xWkIKU7vfX False True False True
- QOhL6VmpGU False False False True
- 22PwkRJdat False True False False
- kfboQ3VeIK True False True False
-
- [30 rows x 4 columns]
"""
- df = DataFrame(tm.getSeriesData()) > 0
- df = df.astype(object)
+ df = DataFrame(
+ np.concatenate(
+ [np.ones((15, 4), dtype=bool), np.zeros((15, 4), dtype=bool)], axis=0
+ ),
+ index=Index([f"foo_{i}" for i in range(30)], dtype=object),
+ columns=Index(list("ABCD"), dtype=object),
+ dtype=object,
+ )
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
-
- # For `any` tests we need to have at least one True before the first NaN
- # in each column
- for i in range(4):
- df.iloc[i, i] = True
return df
@@ -195,27 +177,12 @@ def float_frame_with_na():
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
-
- A B C D
- ABwBzA0ljw -1.128865 -0.897161 0.046603 0.274997
- DJiRzmbyQF 0.728869 0.233502 0.722431 -0.890872
- neMgPD5UBF 0.486072 -1.027393 -0.031553 1.449522
- 0yWA4n8VeX -1.937191 -1.142531 0.805215 -0.462018
- 3slYUbbqU1 0.153260 1.164691 1.489795 -0.545826
- soujjZ0A08 NaN NaN NaN NaN
- 7W6NLGsjB9 NaN NaN NaN NaN
- ... ... ... ... ...
- uhfeaNkCR1 -0.231210 -0.340472 0.244717 -0.901590
- n6p7GYuBIV -0.419052 1.922721 -0.125361 -0.727717
- ZhzAeY6p1y 1.234374 -1.425359 -0.827038 -0.633189
- uWdPsORyUh 0.046738 -0.980445 -1.102965 0.605503
- 3DJA6aN590 -0.091018 -1.684734 -1.100900 0.215947
- 2GBPAzdbMk -2.883405 -1.021071 1.209877 1.633083
- sHadBoyVHw -2.223032 -0.326384 0.258931 0.245517
-
- [30 rows x 4 columns]
"""
- df = DataFrame(tm.getSeriesData())
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((30, 4)),
+ index=Index([f"foo_{i}" for i in range(30)], dtype=object),
+ columns=Index(list("ABCD"), dtype=object),
+ )
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
diff --git a/pandas/tests/indexes/datetimes/methods/test_asof.py b/pandas/tests/indexes/datetimes/methods/test_asof.py
index f52b6da5b2f07..dc92f533087bc 100644
--- a/pandas/tests/indexes/datetimes/methods/test_asof.py
+++ b/pandas/tests/indexes/datetimes/methods/test_asof.py
@@ -6,7 +6,6 @@
date_range,
isna,
)
-import pandas._testing as tm
class TestAsOf:
@@ -18,7 +17,7 @@ def test_asof_partial(self):
assert not isinstance(result, Index)
def test_asof(self):
- index = tm.makeDateIndex(100)
+ index = date_range("2020-01-01", periods=10)
dt = index[0]
assert index.asof(dt) == dt
diff --git a/pandas/tests/indexes/datetimes/methods/test_isocalendar.py b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py
index 3f5a18675735a..97f1003e0f43f 100644
--- a/pandas/tests/indexes/datetimes/methods/test_isocalendar.py
+++ b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py
@@ -1,6 +1,7 @@
from pandas import (
DataFrame,
DatetimeIndex,
+ date_range,
)
import pandas._testing as tm
@@ -21,7 +22,7 @@ def test_isocalendar_returns_correct_values_close_to_new_year_with_tz():
def test_dti_timestamp_isocalendar_fields():
- idx = tm.makeDateIndex(100)
+ idx = date_range("2020-01-01", periods=10)
expected = tuple(idx.isocalendar().iloc[-1].to_list())
result = idx[-1].isocalendar()
assert result == expected
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index 81992219d71b4..e93fc0e2a4e2e 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -106,7 +106,7 @@ def test_dti_timetz(self, tz_naive_fixture):
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
- idx = tm.makeDateIndex(100)
+ idx = date_range("2020-01-01", periods=10)
expected = getattr(idx, field)[-1]
result = getattr(Timestamp(idx[-1]), field)
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index 993f88db38ea6..3ed7fcc027a06 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -42,7 +42,7 @@ class TestDatetimeIndexSetOps:
# TODO: moved from test_datetimelike; dedup with version below
def test_union2(self, sort):
- everything = tm.makeDateIndex(10)
+ everything = date_range("2020-01-01", periods=10)
first = everything[:5]
second = everything[5:]
union = first.union(second, sort=sort)
@@ -50,7 +50,7 @@ def test_union2(self, sort):
@pytest.mark.parametrize("box", [np.array, Series, list])
def test_union3(self, sort, box):
- everything = tm.makeDateIndex(10)
+ everything = date_range("2020-01-01", periods=10)
first = everything[:5]
second = everything[5:]
@@ -203,7 +203,7 @@ def test_union_same_timezone_different_units(self):
# TODO: moved from test_datetimelike; de-duplicate with version below
def test_intersection2(self):
- first = tm.makeDateIndex(10)
+ first = date_range("2020-01-01", periods=10)
second = first[5:]
intersect = first.intersection(second)
tm.assert_index_equal(intersect, second)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 3db81c0285bd2..bb8822f047330 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -540,7 +540,9 @@ def test_map_tseries_indices_return_index(self, index):
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
- date_index = tm.makeDateIndex(24, freq="h", name="hourly")
+ date_index = DatetimeIndex(
+ date_range("2020-01-01", periods=24, freq="h"), name="hourly"
+ )
result = date_index.map(lambda x: x.hour)
expected = Index(np.arange(24, dtype="int64"), name="hourly")
tm.assert_index_equal(result, expected, exact=True)
@@ -1001,7 +1003,7 @@ def test_str_attribute(self, method):
"index",
[
Index(range(5)),
- tm.makeDateIndex(10),
+ date_range("2020-01-01", periods=10),
MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]),
period_range(start="2000", end="2010", freq="Y"),
],
@@ -1065,7 +1067,7 @@ def test_indexing_doesnt_change_class(self):
def test_outer_join_sort(self):
left_index = Index(np.random.default_rng(2).permutation(15))
- right_index = tm.makeDateIndex(10)
+ right_index = date_range("2020-01-01", periods=10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how="outer")
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 5275050391ca3..37bc2812a2095 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -90,15 +90,14 @@ def assert_json_roundtrip_equal(result, expected, orient):
class TestPandasContainer:
@pytest.fixture
def categorical_frame(self):
- _seriesd = tm.getSeriesData()
-
- _cat_frame = DataFrame(_seriesd)
-
- cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
- _cat_frame.index = pd.CategoricalIndex(cat, name="E")
- _cat_frame["E"] = list(reversed(cat))
- _cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
- return _cat_frame
+ data = {
+ c: np.random.default_rng(i).standard_normal(30)
+ for i, c in enumerate(list("ABCD"))
+ }
+ cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * 15
+ data["E"] = list(reversed(cat))
+ data["sort"] = np.arange(30, dtype="int64")
+ return DataFrame(data, index=pd.CategoricalIndex(cat, name="E"))
@pytest.fixture
def datetime_series(self):
diff --git a/pandas/tests/io/pytables/test_time_series.py b/pandas/tests/io/pytables/test_time_series.py
index 4afcf5600dce6..726dd0d420347 100644
--- a/pandas/tests/io/pytables/test_time_series.py
+++ b/pandas/tests/io/pytables/test_time_series.py
@@ -8,6 +8,7 @@
DatetimeIndex,
Series,
_testing as tm,
+ date_range,
period_range,
)
from pandas.tests.io.pytables.common import ensure_clean_store
@@ -28,7 +29,7 @@ def test_store_datetime_fractional_secs(setup_path, unit):
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_tseries_indices_series(setup_path):
with ensure_clean_store(setup_path) as store:
- idx = tm.makeDateIndex(10)
+ idx = date_range("2020-01-01", periods=10)
ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
store["a"] = ser
result = store["a"]
@@ -50,7 +51,7 @@ def test_tseries_indices_series(setup_path):
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_tseries_indices_frame(setup_path):
with ensure_clean_store(setup_path) as store:
- idx = tm.makeDateIndex(10)
+ idx = date_range("2020-01-01", periods=10)
df = DataFrame(
np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx
)
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 509e0ea5c482e..f748d7c5fc758 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -260,7 +260,7 @@ def test_time_formatter(self, time, format_expected):
@pytest.mark.parametrize("freq", ("B", "ms", "s"))
def test_dateindex_conversion(self, freq, dtc):
rtol = 10**-9
- dateindex = tm.makeDateIndex(k=10, freq=freq)
+ dateindex = date_range("2020-01-01", periods=10, freq=freq)
rs = dtc.convert(dateindex, None, None)
xp = converter.mdates.date2num(dateindex._mpl_repr())
tm.assert_almost_equal(rs, xp, rtol=rtol)
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index c8b47666e1b4a..9bf76637e1d71 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -237,7 +237,7 @@ def test_boolean(self):
with pytest.raises(TypeError, match=msg):
_check_plot_works(s.plot)
- @pytest.mark.parametrize("index", [None, tm.makeDateIndex(k=4)])
+ @pytest.mark.parametrize("index", [None, date_range("2020-01-01", periods=4)])
def test_line_area_nan_series(self, index):
values = [1, 2, np.nan, 3]
d = Series(values, index=index)
diff --git a/pandas/tests/series/indexing/test_get.py b/pandas/tests/series/indexing/test_get.py
index 61007c08b50e0..1f3711ad91903 100644
--- a/pandas/tests/series/indexing/test_get.py
+++ b/pandas/tests/series/indexing/test_get.py
@@ -3,8 +3,10 @@
import pandas as pd
from pandas import (
+ DatetimeIndex,
Index,
Series,
+ date_range,
)
import pandas._testing as tm
@@ -168,7 +170,9 @@ def test_get_with_default():
"arr",
[
np.random.default_rng(2).standard_normal(10),
- tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern"),
+ DatetimeIndex(date_range("2020-01-01", periods=10), name="a").tz_localize(
+ tz="US/Eastern"
+ ),
],
)
def test_get_with_ea(arr):
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index 5d0ef893d5723..a5170898b1720 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -71,7 +71,9 @@ def test_fillna_value_or_method(self, datetime_series):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
- ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
+ ts = Series(
+ [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)
+ )
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
@@ -880,7 +882,9 @@ def test_fillna_bug(self):
tm.assert_series_equal(filled, expected)
def test_ffill(self):
- ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
+ ts = Series(
+ [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)
+ )
ts.iloc[2] = np.nan
tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))
@@ -891,7 +895,9 @@ def test_ffill_mixed_dtypes_without_missing_data(self):
tm.assert_series_equal(series, result)
def test_bfill(self):
- ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
+ ts = Series(
+ [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)
+ )
ts.iloc[2] = np.nan
tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill"))
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index fe0f79b766f72..477f36bdf4214 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -52,7 +52,7 @@ def test_replace_noop_doesnt_downcast(self):
assert res.dtype == object
def test_replace(self):
- N = 100
+ N = 50
ser = pd.Series(np.random.default_rng(2).standard_normal(N))
ser[0:4] = np.nan
ser[6:10] = 0
@@ -70,7 +70,7 @@ def test_replace(self):
ser = pd.Series(
np.fabs(np.random.default_rng(2).standard_normal(N)),
- tm.makeDateIndex(N),
+ pd.date_range("2020-01-01", periods=N),
dtype=object,
)
ser[:5] = np.nan
@@ -290,10 +290,10 @@ def test_replace_Int_with_na(self, any_int_ea_dtype):
tm.assert_series_equal(result, expected)
def test_replace2(self):
- N = 100
+ N = 50
ser = pd.Series(
np.fabs(np.random.default_rng(2).standard_normal(N)),
- tm.makeDateIndex(N),
+ pd.date_range("2020-01-01", periods=N),
dtype=object,
)
ser[:5] = np.nan
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index e08f8d0c15f39..773d7e174feac 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -2147,7 +2147,7 @@ def test_series_string_inference_na_first(self):
class TestSeriesConstructorIndexCoercion:
def test_series_constructor_datetimelike_index_coercion(self):
- idx = tm.makeDateIndex(10000)
+ idx = date_range("2020-01-01", periods=5)
ser = Series(
np.random.default_rng(2).standard_normal(len(idx)), idx.astype(object)
)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56241 | 2023-11-29T19:46:43Z | 2023-11-30T17:35:19Z | 2023-11-30T17:35:19Z | 2023-11-30T17:35:22Z |
TST: dt64 units | diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 149bc2d932f0e..26cbc77e4e8ae 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -467,13 +467,15 @@ def _array_strptime_with_fallback(
"""
result, tz_out = array_strptime(arg, fmt, exact=exact, errors=errors, utc=utc)
if tz_out is not None:
- dtype = DatetimeTZDtype(tz=tz_out)
+ unit = np.datetime_data(result.dtype)[0]
+ dtype = DatetimeTZDtype(tz=tz_out, unit=unit)
dta = DatetimeArray._simple_new(result, dtype=dtype)
if utc:
dta = dta.tz_convert("UTC")
return Index(dta, name=name)
elif result.dtype != object and utc:
- res = Index(result, dtype="M8[ns, UTC]", name=name)
+ unit = np.datetime_data(result.dtype)[0]
+ res = Index(result, dtype=f"M8[{unit}, UTC]", name=name)
return res
return Index(result, dtype=result.dtype, name=name)
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index e88bde07aee90..73c6b4a1b2a0d 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -746,20 +746,10 @@ def test_timedelta_ops_with_missing_values(self):
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
- msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]"
- with pytest.raises(TypeError, match=msg):
- # Passing datetime64-dtype data to TimedeltaIndex is no longer
- # supported GH#29794
- pd.to_timedelta(Series([NaT])) # TODO: belongs elsewhere?
-
sn = pd.to_timedelta(Series([NaT], dtype="m8[ns]"))
df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta)
- with pytest.raises(TypeError, match=msg):
- # Passing datetime64-dtype data to TimedeltaIndex is no longer
- # supported GH#29794
- DataFrame([NaT]).apply(pd.to_timedelta) # TODO: belongs elsewhere?
dfn = DataFrame([NaT._value]).apply(pd.to_timedelta)
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index eb6e93b490574..e2b8ebcb79a3b 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -268,7 +268,7 @@ def test_array_copy():
),
(
[datetime.datetime(2000, 1, 1), datetime.datetime(2001, 1, 1)],
- DatetimeArray._from_sequence(["2000", "2001"]),
+ DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),
),
(
np.array([1, 2], dtype="M8[ns]"),
@@ -284,7 +284,7 @@ def test_array_copy():
(
[pd.Timestamp("2000", tz="CET"), pd.Timestamp("2001", tz="CET")],
DatetimeArray._from_sequence(
- ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET")
+ ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET", unit="ns")
),
),
(
@@ -293,7 +293,7 @@ def test_array_copy():
datetime.datetime(2001, 1, 1, tzinfo=cet),
],
DatetimeArray._from_sequence(
- ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cet)
+ ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cet, unit="ns")
),
),
# timedelta
diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py
index 4ad4e29550d56..bcf4e8fb0e64a 100644
--- a/pandas/tests/frame/constructors/test_from_records.py
+++ b/pandas/tests/frame/constructors/test_from_records.py
@@ -442,26 +442,27 @@ def test_from_records_misc_brokenness(self):
exp = DataFrame(data, index=["a", "b", "c"])
tm.assert_frame_equal(result, exp)
+ def test_from_records_misc_brokenness2(self):
# GH#2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj
- df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
- result = df2_obj.dtypes
- expected = Series(
- [np.dtype("datetime64[ns]"), np.dtype("object")], index=["date", "test"]
+ result = DataFrame.from_records(rows, columns=["date", "test"])
+ expected = DataFrame(
+ {"date": [row[0] for row in rows], "test": [row[1] for row in rows]}
)
- tm.assert_series_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
+ assert result.dtypes["test"] == np.dtype(object)
+ def test_from_records_misc_brokenness3(self):
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
- df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
- result = df2_obj.dtypes
- expected = Series(
- [np.dtype("datetime64[ns]"), np.dtype("int64")], index=["date", "test"]
+ result = DataFrame.from_records(rows, columns=["date", "test"])
+ expected = DataFrame(
+ {"date": [row[0] for row in rows], "test": [row[1] for row in rows]}
)
- tm.assert_series_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
def test_from_records_empty(self):
# GH#3562
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index f07c53060a06b..4b32d3de59ca2 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -809,11 +809,13 @@ def test_replace_for_new_dtypes(self, datetime_frame):
Timestamp("20130104", tz="US/Eastern"),
DataFrame(
{
- "A": [
- Timestamp("20130101", tz="US/Eastern"),
- Timestamp("20130104", tz="US/Eastern"),
- Timestamp("20130103", tz="US/Eastern"),
- ],
+ "A": pd.DatetimeIndex(
+ [
+ Timestamp("20130101", tz="US/Eastern"),
+ Timestamp("20130104", tz="US/Eastern"),
+ Timestamp("20130103", tz="US/Eastern"),
+ ]
+ ).as_unit("ns"),
"B": [0, np.nan, 2],
}
),
@@ -1174,6 +1176,7 @@ def test_replace_datetimetz(self):
"B": [0, np.nan, 2],
}
)
+ expected["A"] = expected["A"].dt.as_unit("ns")
tm.assert_frame_equal(result, expected)
result = df.copy()
@@ -1195,6 +1198,7 @@ def test_replace_datetimetz(self):
"B": [0, np.nan, 2],
}
)
+ expected["A"] = expected["A"].dt.as_unit("ns")
tm.assert_frame_equal(result, expected)
result = df.copy()
diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 377128ee12ee6..c989b3d26677c 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -699,9 +699,12 @@ def test_reset_index_multiindex_nat():
df = DataFrame({"id": idx, "tstamp": tstamp, "a": list("abc")})
df.loc[2, "tstamp"] = pd.NaT
result = df.set_index(["id", "tstamp"]).reset_index("id")
+ exp_dti = pd.DatetimeIndex(
+ ["2015-07-01", "2015-07-02", "NaT"], dtype="M8[ns]", name="tstamp"
+ )
expected = DataFrame(
{"id": range(3), "a": list("abc")},
- index=pd.DatetimeIndex(["2015-07-01", "2015-07-02", "NaT"], name="tstamp"),
+ index=exp_dti,
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 4a0b192244dc8..73969457135f0 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -592,13 +592,13 @@ def test_integer_values_and_tz_interpreted_as_utc(self):
result = DatetimeIndex(values).tz_localize("US/Central")
- expected = DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central")
+ expected = DatetimeIndex(["2000-01-01T00:00:00"], dtype="M8[ns, US/Central]")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz="UTC")
- expected = DatetimeIndex(["2000-01-01T00:00:00"], tz="UTC")
+ expected = DatetimeIndex(["2000-01-01T00:00:00"], dtype="M8[ns, UTC]")
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 411cc90ba41a7..5275050391ca3 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1625,7 +1625,8 @@ def test_read_timezone_information(self):
result = read_json(
StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index"
)
- expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC"))
+ exp_dti = DatetimeIndex(["2019-01-01 11:00:00"], dtype="M8[ns, UTC]")
+ expected = Series([88], index=exp_dti)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/reshape/concat/test_datetimes.py b/pandas/tests/reshape/concat/test_datetimes.py
index c00a2dc92a52b..71ddff7438254 100644
--- a/pandas/tests/reshape/concat/test_datetimes.py
+++ b/pandas/tests/reshape/concat/test_datetimes.py
@@ -61,7 +61,6 @@ def test_concat_datetime_timezone(self):
dtype="M8[ns, Europe/Paris]",
freq="h",
)
-
expected = DataFrame(
[[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"]
)
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index f8ececf6c0540..4a852daaadf98 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -437,8 +437,10 @@ def test_pivot_no_values(self):
index=idx,
)
res = df.pivot_table(index=df.index.month, columns=Grouper(key="dt", freq="ME"))
- exp_columns = MultiIndex.from_tuples([("A", pd.Timestamp("2011-01-31"))])
- exp_columns.names = [None, "dt"]
+ exp_columns = MultiIndex.from_arrays(
+ [["A"], pd.DatetimeIndex(["2011-01-31"], dtype="M8[ns]")],
+ names=[None, "dt"],
+ )
exp = DataFrame(
[3.25, 2.0], index=Index([1, 2], dtype=np.int32), columns=exp_columns
)
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 0f3577a214186..fce0581260210 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -88,7 +88,8 @@ def test_setitem_with_tz(self, tz, indexer_sli):
Timestamp("2016-01-01 00:00", tz=tz),
Timestamp("2011-01-01 00:00", tz=tz),
Timestamp("2016-01-01 02:00", tz=tz),
- ]
+ ],
+ dtype=orig.dtype,
)
# scalar
@@ -100,6 +101,7 @@ def test_setitem_with_tz(self, tz, indexer_sli):
vals = Series(
[Timestamp("2011-01-01", tz=tz), Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
+ dtype=orig.dtype,
)
assert vals.dtype == f"datetime64[ns, {tz}]"
@@ -108,7 +110,8 @@ def test_setitem_with_tz(self, tz, indexer_sli):
Timestamp("2016-01-01 00:00", tz=tz),
Timestamp("2011-01-01 00:00", tz=tz),
Timestamp("2012-01-01 00:00", tz=tz),
- ]
+ ],
+ dtype=orig.dtype,
)
ser = orig.copy()
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 8139fe52c7037..f74fe459eb4d6 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -2058,7 +2058,11 @@ def test_to_datetime_unit(self, dtype):
ser = Series([epoch + t for t in range(20)]).astype(dtype)
result = to_datetime(ser, unit="s")
expected = Series(
- [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [
+ Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
+ for t in range(20)
+ ],
+ dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
@@ -2208,7 +2212,8 @@ def test_dataframe_field_aliases_column_subset(self, df, cache, unit):
# unit mappings
result = to_datetime(df[list(unit.keys())].rename(columns=unit), cache=cache)
expected = Series(
- [Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")]
+ [Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")],
+ dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
@@ -2970,7 +2975,8 @@ def test_to_datetime_iso8601_noleading_0s(self, cache, format):
Timestamp("2015-03-03"),
]
)
- tm.assert_series_equal(to_datetime(ser, format=format, cache=cache), expected)
+ result = to_datetime(ser, format=format, cache=cache)
+ tm.assert_series_equal(result, expected)
def test_parse_dates_infer_datetime_format_warning(self):
# GH 49024
@@ -3364,7 +3370,8 @@ def test_julian(self, julian_dates):
def test_unix(self):
result = Series(to_datetime([0, 1, 2], unit="D", origin="unix"))
expected = Series(
- [Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
+ [Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")],
+ dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
@@ -3483,7 +3490,7 @@ def test_arg_tz_ns_unit(self, offset, utc, exp):
# GH 25546
arg = "2019-01-01T00:00:00.000" + offset
result = to_datetime([arg], unit="ns", utc=utc)
- expected = to_datetime([exp])
+ expected = to_datetime([exp]).as_unit("ns")
tm.assert_index_equal(result, expected)
@@ -3610,11 +3617,12 @@ def test_to_datetime_monotonic_increasing_index(cache):
)
def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length):
# GH#45319
- s = Series(
+ ser = Series(
[datetime.fromisoformat("1446-04-12 00:00:00+00:00")]
- + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length)
+ + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length),
+ dtype=object,
)
- result1 = to_datetime(s, errors="coerce", utc=True)
+ result1 = to_datetime(ser, errors="coerce", utc=True)
expected1 = Series(
[NaT] + ([Timestamp("1991-10-20 00:00:00+00:00")] * series_length)
@@ -3622,7 +3630,7 @@ def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length):
tm.assert_series_equal(result1, expected1)
- result2 = to_datetime(s, errors="ignore", utc=True)
+ result2 = to_datetime(ser, errors="ignore", utc=True)
expected2 = Series(
[datetime.fromisoformat("1446-04-12 00:00:00+00:00")]
@@ -3632,7 +3640,7 @@ def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length):
tm.assert_series_equal(result2, expected2)
with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond timestamp"):
- to_datetime(s, errors="raise", utc=True)
+ to_datetime(ser, errors="raise", utc=True)
def test_to_datetime_format_f_parse_nanos():
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index e588bc83b0de8..b3d4d9d67190f 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -21,6 +21,17 @@
class TestTimedeltas:
+ def test_to_timedelta_dt64_raises(self):
+ # Passing datetime64-dtype data to TimedeltaIndex is no longer
+ # supported GH#29794
+ msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]"
+
+ ser = Series([pd.NaT])
+ with pytest.raises(TypeError, match=msg):
+ to_timedelta(ser)
+ with pytest.raises(TypeError, match=msg):
+ ser.to_frame().apply(to_timedelta)
+
@pytest.mark.parametrize("readonly", [True, False])
def test_to_timedelta_readonly(self, readonly):
# GH#34857
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
aimed at trimming the diff in #55901 | https://api.github.com/repos/pandas-dev/pandas/pulls/56239 | 2023-11-29T16:51:01Z | 2023-11-29T17:50:27Z | 2023-11-29T17:50:27Z | 2023-11-30T01:31:26Z |
BUG: Read CSV on python engine fails with callable skiprows and chunk size specified (#55677) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index efa4a52993a90..08fa594632259 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -407,6 +407,7 @@ MultiIndex
I/O
^^^
+- Bug in :func:`read_csv` where ``engine="python"`` was causing a ``TypeError`` when a callable skiprows and a chunk size was specified. (:issue:`55677`)
- Bug in :func:`read_csv` where ``on_bad_lines="warn"`` would write to ``stderr`` instead of raise a Python warning. This now yields a :class:`.errors.ParserWarning` (:issue:`54296`)
- Bug in :func:`read_csv` with ``engine="pyarrow"`` where ``usecols`` wasn't working with a csv with no headers (:issue:`54459`)
- Bug in :func:`read_excel`, with ``engine="xlrd"`` (``xls`` files) erroring when file contains NaNs/Infs (:issue:`54564`)
diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py
index fae3293414b02..742d9f6defc76 100644
--- a/pandas/io/parsers/python_parser.py
+++ b/pandas/io/parsers/python_parser.py
@@ -1117,18 +1117,15 @@ def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
new_rows = []
try:
if rows is not None:
- rows_to_skip = 0
- if self.skiprows is not None and self.pos is not None:
- # Only read additional rows if pos is in skiprows
- rows_to_skip = len(
- set(self.skiprows) - set(range(self.pos))
- )
-
- for _ in range(rows + rows_to_skip):
- # assert for mypy, data is Iterator[str] or None, would
- # error in next
- assert self.data is not None
- new_rows.append(next(self.data))
+ row_index = 0
+ row_ct = 0
+ offset = self.pos if self.pos is not None else 0
+ while row_ct < rows:
+ new_row = next(self.data)
+ if not self.skipfunc(offset + row_index):
+ row_ct += 1
+ row_index += 1
+ new_rows.append(new_row)
len_new_rows = len(new_rows)
new_rows = self._remove_skipped_rows(new_rows)
diff --git a/pandas/tests/io/parser/test_skiprows.py b/pandas/tests/io/parser/test_skiprows.py
index 9146af3f969e6..6ef66d2567fe1 100644
--- a/pandas/tests/io/parser/test_skiprows.py
+++ b/pandas/tests/io/parser/test_skiprows.py
@@ -99,11 +99,11 @@ def test_skip_rows_blank(all_parsers):
[
(
"""id,text,num_lines
-1,"line 11
-line 12",2
-2,"line 21
-line 22",2
-3,"line 31",1""",
+ 1,"line 11
+ line 12",2
+ 2,"line 21
+ line 22",2
+ 3,"line 31",1""",
{"skiprows": [1]},
DataFrame(
[[2, "line 21\nline 22", 2], [3, "line 31", 1]],
@@ -156,23 +156,23 @@ def test_skip_row_with_quote(all_parsers):
[
(
"""id,text,num_lines
-1,"line \n'11' line 12",2
-2,"line \n'21' line 22",2
-3,"line \n'31' line 32",1""",
+ 1,"line \n'11' line 12",2
+ 2,"line \n'21' line 22",2
+ 3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2], [3, "line \n'31' line 32", 1]],
),
(
"""id,text,num_lines
-1,"line '11\n' line 12",2
-2,"line '21\n' line 22",2
-3,"line '31\n' line 32",1""",
+ 1,"line '11\n' line 12",2
+ 2,"line '21\n' line 22",2
+ 3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2], [3, "line '31\n' line 32", 1]],
),
(
"""id,text,num_lines
-1,"line '11\n' \r\tline 12",2
-2,"line '21\n' \r\tline 22",2
-3,"line '31\n' \r\tline 32",1""",
+ 1,"line '11\n' \r\tline 12",2
+ 2,"line '21\n' \r\tline 22",2
+ 3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2], [3, "line '31\n' \r\tline 32", 1]],
),
],
@@ -301,3 +301,31 @@ def test_skip_rows_and_n_rows(all_parsers):
result = parser.read_csv(StringIO(data), nrows=5, skiprows=[2, 4, 6])
expected = DataFrame({"a": [1, 3, 5, 7, 8], "b": ["a", "c", "e", "g", "h"]})
tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow
+def test_skip_rows_with_chunks(all_parsers):
+ # GH 55677
+ data = """col_a
+10
+20
+30
+40
+50
+60
+70
+80
+90
+100
+"""
+ parser = all_parsers
+ reader = parser.read_csv(
+ StringIO(data), engine=parser, skiprows=lambda x: x in [1, 4, 5], chunksize=4
+ )
+ df1 = next(reader)
+ df2 = next(reader)
+
+ tm.assert_frame_equal(
+ df1, DataFrame({"col_a": [20, 30, 60, 70]}, index=[0, 1, 2, 3])
+ )
+ tm.assert_frame_equal(df2, DataFrame({"col_a": [80, 90, 100]}, index=[4, 5, 6]))
| -Added support for the python parser to handle using skiprows and chunk_size options at the same time to ensure API contract is met.
-Added a regression test to ensure this bug can be quickly caught in the future if it reappears.
- [✅ ] closes #55677 (Replace xxxx with the GitHub issue number)
- [✅] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [✅] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [✅] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [✅] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56238 | 2023-11-29T16:16:11Z | 2023-11-30T02:08:19Z | null | 2023-11-30T02:08:24Z |
DEPR: Default of observed=False in DataFrame.pivot_table | diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst
index 34d04745ccdb5..8fb991dca02db 100644
--- a/doc/source/user_guide/categorical.rst
+++ b/doc/source/user_guide/categorical.rst
@@ -647,7 +647,7 @@ Pivot tables:
raw_cat = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b", "c"])
df = pd.DataFrame({"A": raw_cat, "B": ["c", "d", "c", "d"], "values": [1, 2, 3, 4]})
- pd.pivot_table(df, values="values", index=["A", "B"])
+ pd.pivot_table(df, values="values", index=["A", "B"], observed=False)
Data munging
------------
diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst
index cdffc6968a170..808741ccf4475 100644
--- a/doc/source/whatsnew/v0.23.0.rst
+++ b/doc/source/whatsnew/v0.23.0.rst
@@ -286,12 +286,33 @@ For pivoting operations, this behavior is *already* controlled by the ``dropna``
df = pd.DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df
-.. ipython:: python
- pd.pivot_table(df, values='values', index=['A', 'B'],
- dropna=True)
- pd.pivot_table(df, values='values', index=['A', 'B'],
- dropna=False)
+.. code-block:: ipython
+
+ In [1]: pd.pivot_table(df, values='values', index=['A', 'B'], dropna=True)
+
+ Out[1]:
+ values
+ A B
+ a c 1.0
+ d 2.0
+ b c 3.0
+ d 4.0
+
+ In [2]: pd.pivot_table(df, values='values', index=['A', 'B'], dropna=False)
+
+ Out[2]:
+ values
+ A B
+ a c 1.0
+ d 2.0
+ y NaN
+ b c 3.0
+ d 4.0
+ y NaN
+ z c NaN
+ d NaN
+ y NaN
.. _whatsnew_0230.enhancements.window_raw:
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index ade87c4215a38..c8fddd8ac49c2 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -434,6 +434,7 @@ Other Deprecations
- Deprecated the ``ordinal`` keyword in :class:`PeriodIndex`, use :meth:`PeriodIndex.from_ordinals` instead (:issue:`55960`)
- Deprecated the ``unit`` keyword in :class:`TimedeltaIndex` construction, use :func:`to_timedelta` instead (:issue:`55499`)
- Deprecated the behavior of :meth:`Series.value_counts` and :meth:`Index.value_counts` with object dtype; in a future version these will not perform dtype inference on the resulting :class:`Index`, do ``result.index = result.index.infer_objects()`` to retain the old behavior (:issue:`56161`)
+- Deprecated the default of ``observed=False`` in :meth:`DataFrame.pivot_table`; will be ``True`` in a future version (:issue:`56236`)
- Deprecated the extension test classes ``BaseNoReduceTests``, ``BaseBooleanReduceTests``, and ``BaseNumericReduceTests``, use ``BaseReduceTests`` instead (:issue:`54663`)
- Deprecated the option ``mode.data_manager`` and the ``ArrayManager``; only the ``BlockManager`` will be available in future versions (:issue:`55043`)
- Deprecated the previous implementation of :class:`DataFrame.stack`; specify ``future_stack=True`` to adopt the future version (:issue:`53515`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 008c1e0d10ba4..c0c91d808f31d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9297,6 +9297,11 @@ def pivot(
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
+ .. deprecated:: 2.2.0
+
+ The default value of ``False`` is deprecated and will change to
+ ``True`` in a future version of pandas.
+
sort : bool, default True
Specifies if the result should be sorted.
@@ -9407,7 +9412,7 @@ def pivot_table(
margins: bool = False,
dropna: bool = True,
margins_name: Level = "All",
- observed: bool = False,
+ observed: bool | lib.NoDefault = lib.no_default,
sort: bool = True,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index eba4f72b5eb8f..82718d4c43a65 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -10,6 +10,7 @@
Literal,
cast,
)
+import warnings
import numpy as np
@@ -18,6 +19,7 @@
Appender,
Substitution,
)
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
@@ -68,7 +70,7 @@ def pivot_table(
margins: bool = False,
dropna: bool = True,
margins_name: Hashable = "All",
- observed: bool = False,
+ observed: bool | lib.NoDefault = lib.no_default,
sort: bool = True,
) -> DataFrame:
index = _convert_by(index)
@@ -123,7 +125,7 @@ def __internal_pivot_table(
margins: bool,
dropna: bool,
margins_name: Hashable,
- observed: bool,
+ observed: bool | lib.NoDefault,
sort: bool,
) -> DataFrame:
"""
@@ -166,7 +168,18 @@ def __internal_pivot_table(
pass
values = list(values)
- grouped = data.groupby(keys, observed=observed, sort=sort, dropna=dropna)
+ observed_bool = False if observed is lib.no_default else observed
+ grouped = data.groupby(keys, observed=observed_bool, sort=sort, dropna=dropna)
+ if observed is lib.no_default and any(
+ ping._passed_categorical for ping in grouped.grouper.groupings
+ ):
+ warnings.warn(
+ "The default value of observed=False is deprecated and will change "
+ "to observed=True in a future version of pandas. Specify "
+ "observed=False to silence this warning and retain the current behavior",
+ category=FutureWarning,
+ stacklevel=find_stack_level(),
+ )
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
@@ -719,6 +732,7 @@ def crosstab(
margins=margins,
margins_name=margins_name,
dropna=dropna,
+ observed=False,
**kwargs, # type: ignore[arg-type]
)
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 4a852daaadf98..dab2b034d3fd4 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -201,7 +201,9 @@ def test_pivot_table_categorical(self):
["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True
)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
- result = pivot_table(df, values="values", index=["A", "B"], dropna=True)
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = pivot_table(df, values="values", index=["A", "B"], dropna=True)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1.0, 2.0, 3.0, 4.0]}, index=exp_index)
@@ -220,7 +222,9 @@ def test_pivot_table_dropna_categoricals(self, dropna):
)
df["A"] = df["A"].astype(CategoricalDtype(categories, ordered=False))
- result = df.pivot_table(index="B", columns="A", values="C", dropna=dropna)
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.pivot_table(index="B", columns="A", values="C", dropna=dropna)
expected_columns = Series(["a", "b", "c"], name="A")
expected_columns = expected_columns.astype(
CategoricalDtype(categories, ordered=False)
@@ -250,7 +254,9 @@ def test_pivot_with_non_observable_dropna(self, dropna):
}
)
- result = df.pivot_table(index="A", values="B", dropna=dropna)
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.pivot_table(index="A", values="B", dropna=dropna)
if dropna:
values = [2.0, 3.0]
codes = [0, 1]
@@ -283,7 +289,9 @@ def test_pivot_with_non_observable_dropna_multi_cat(self, dropna):
}
)
- result = df.pivot_table(index="A", values="B", dropna=dropna)
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.pivot_table(index="A", values="B", dropna=dropna)
expected = DataFrame(
{"B": [2.0, 3.0, 0.0]},
index=Index(
@@ -301,7 +309,10 @@ def test_pivot_with_non_observable_dropna_multi_cat(self, dropna):
def test_pivot_with_interval_index(self, interval_values, dropna):
# GH 25814
df = DataFrame({"A": interval_values, "B": 1})
- result = df.pivot_table(index="A", values="B", dropna=dropna)
+
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.pivot_table(index="A", values="B", dropna=dropna)
expected = DataFrame(
{"B": 1.0}, index=Index(interval_values.unique(), name="A")
)
@@ -322,9 +333,11 @@ def test_pivot_with_interval_index_margins(self):
}
)
- pivot_tab = pivot_table(
- df, index="C", columns="B", values="A", aggfunc="sum", margins=True
- )
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ pivot_tab = pivot_table(
+ df, index="C", columns="B", values="A", aggfunc="sum", margins=True
+ )
result = pivot_tab["All"]
expected = Series(
@@ -1827,7 +1840,9 @@ def test_categorical_margins_category(self, observed):
df.y = df.y.astype("category")
df.z = df.z.astype("category")
- table = df.pivot_table("x", "y", "z", dropna=observed, margins=True)
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ table = df.pivot_table("x", "y", "z", dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_margins_casted_to_float(self):
@@ -1889,9 +1904,11 @@ def test_categorical_aggfunc(self, observed):
{"C1": ["A", "B", "C", "C"], "C2": ["a", "a", "b", "b"], "V": [1, 2, 3, 4]}
)
df["C1"] = df["C1"].astype("category")
- result = df.pivot_table(
- "V", index="C1", columns="C2", dropna=observed, aggfunc="count"
- )
+ msg = "The default value of observed=False is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.pivot_table(
+ "V", index="C1", columns="C2", dropna=observed, aggfunc="count"
+ )
expected_index = pd.CategoricalIndex(
["A", "B", "C"], categories=["A", "B", "C"], ordered=False, name="C1"
| - [x] closes #56236 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56237 | 2023-11-29T04:17:59Z | 2023-12-04T18:27:36Z | 2023-12-04T18:27:35Z | 2023-12-04T23:38:37Z |
Table cell styling | diff --git a/pandas/io/html.py b/pandas/io/html.py
index 5d5bf079784be..20a7e9972867b 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -238,7 +238,7 @@ def __init__(
self.extract_links = extract_links
self.storage_options = storage_options
- def parse_tables(self):
+ def parse_tables(self, td_converter: Callable | None = None):
"""
Parse and return all tables from the DOM.
@@ -247,7 +247,9 @@ def parse_tables(self):
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
- return (self._parse_thead_tbody_tfoot(table) for table in tables)
+ return (
+ self._parse_thead_tbody_tfoot(table, td_converter)
+ for table in tables)
def _attr_getter(self, obj, attr):
"""
@@ -422,13 +424,14 @@ def _build_doc(self):
"""
raise AbstractMethodError(self)
- def _parse_thead_tbody_tfoot(self, table_html):
+ def _parse_thead_tbody_tfoot(self, table_html, td_converter: Callable):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
+ td_converter: Callable
Returns
-------
@@ -461,15 +464,14 @@ def row_is_all_th(row):
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
- header = self._expand_colspan_rowspan(header_rows, section="header")
- body = self._expand_colspan_rowspan(body_rows, section="body")
- footer = self._expand_colspan_rowspan(footer_rows, section="footer")
+ header = self._expand_colspan_rowspan(header_rows, section="header", td_converter=td_converter)
+ body = self._expand_colspan_rowspan(body_rows, section="body", td_converter=td_converter)
+ footer = self._expand_colspan_rowspan(footer_rows, section="footer", td_converter=td_converter)
return header, body, footer
def _expand_colspan_rowspan(
- self, rows, section: Literal["header", "footer", "body"]
- ):
+ self, rows, section: Literal["header", "footer", "body"], *, td_converter: Callable | None = None):
"""
Given a list of <tr>s, return a list of text rows.
@@ -502,7 +504,7 @@ def _expand_colspan_rowspan(
index = 0
tds = self._parse_td(tr)
- for td in tds:
+ for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
@@ -521,6 +523,7 @@ def _expand_colspan_rowspan(
colspan = int(self._attr_getter(td, "colspan") or 1)
for _ in range(colspan):
+ text = td_converter(td, text, section) if td_converter else text
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
@@ -964,6 +967,7 @@ def _parse(
match,
attrs,
encoding,
+ td_converter,
displayed_only,
extract_links,
storage_options,
@@ -986,7 +990,7 @@ def _parse(
)
try:
- tables = p.parse_tables()
+ tables = p.parse_tables(td_converter)
except ValueError as caught:
# if `io` is an io-like object, check if it's seekable
# and try to rewind it before trying the next parser
@@ -1044,6 +1048,7 @@ def read_html(
encoding: str | None = None,
decimal: str = ".",
converters: dict | None = None,
+ td_converter: Callable | None = None,
na_values: Iterable[object] | None = None,
keep_default_na: bool = True,
displayed_only: bool = True,
@@ -1138,6 +1143,10 @@ def read_html(
input argument, the cell (not column) content, and return the
transformed content.
+ td_converter: Callable, default None
+ A python function that converts text based on <td> content. For example,
+ CSS content.
+
na_values : iterable, default None
Custom NA values.
@@ -1256,6 +1265,7 @@ def read_html(
encoding=encoding,
decimal=decimal,
converters=converters,
+ td_converter=td_converter,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index dcee52011a691..3588a4713a993 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -1653,3 +1653,4 @@ def test_style_tag(self, flavor_read_html):
result = flavor_read_html(StringIO(data))[0]
expected = DataFrame(data=[["A1", "B1"], ["A2", "B2"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
+
\ No newline at end of file
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56235 | 2023-11-29T03:01:38Z | 2023-12-27T19:16:55Z | null | 2023-12-27T19:16:55Z |
DOC: Fix typo in copy_on_write docs | diff --git a/doc/source/user_guide/copy_on_write.rst b/doc/source/user_guide/copy_on_write.rst
index 9dbc46ca2db0e..fb0da70a0ea07 100644
--- a/doc/source/user_guide/copy_on_write.rst
+++ b/doc/source/user_guide/copy_on_write.rst
@@ -26,7 +26,7 @@ Previous behavior
-----------------
pandas indexing behavior is tricky to understand. Some operations return views while
-other return copies. Depending on the result of the operation, mutation one object
+other return copies. Depending on the result of the operation, mutating one object
might accidentally mutate another:
.. ipython:: python
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56234 | 2023-11-29T01:47:11Z | 2023-11-29T17:51:27Z | 2023-11-29T17:51:27Z | 2023-11-29T17:51:34Z |
DOC: Update release instructions | diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index 29cc256f35a4e..c7803d8401e4e 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -449,9 +449,13 @@ which will be triggered when the tag is pushed.
git tag -a v1.5.0.dev0 -m "DEV: Start 1.5.0"
git push upstream main --follow-tags
-3. Build the source distribution (git must be in the tag commit)::
+3. Download the source distribution and wheels from the `wheel staging area <https://anaconda.org/scientific-python-nightly-wheels/pandas>`_.
+ Be careful to make sure that no wheels are missing (e.g. due to failed builds).
- ./setup.py sdist --formats=gztar --quiet
+ Running scripts/download_wheels.sh with the version that you want to download wheels/the sdist for should do the trick.
+ This script will make a ``dist`` folder inside your clone of pandas and put the downloaded wheels and sdist there::
+
+ scripts/download_wheels.sh <VERSION>
4. Create a `new GitHub release <https://github.com/pandas-dev/pandas/releases/new>`_:
@@ -463,23 +467,19 @@ which will be triggered when the tag is pushed.
- Set as the latest release: Leave checked, unless releasing a patch release for an older version
(e.g. releasing 1.4.5 after 1.5 has been released)
-5. The GitHub release will after some hours trigger an
+5. Upload wheels to PyPI::
+
+ twine upload pandas/dist/pandas-<version>*.{whl,tar.gz} --skip-existing
+
+6. The GitHub release will after some hours trigger an
`automated conda-forge PR <https://github.com/conda-forge/pandas-feedstock/pulls>`_.
+ (If you don't want to wait, you can open an issue titled ``@conda-forge-admin, please update version`` to trigger the bot.)
Merge it once the CI is green, and it will generate the conda-forge packages.
+
In case a manual PR needs to be done, the version, sha256 and build fields are the
ones that usually need to be changed. If anything else in the recipe has changed since
the last release, those changes should be available in ``ci/meta.yaml``.
-6. Packages for supported versions in PyPI are built automatically from our CI.
- Once all packages are build download all wheels from the
- `Anaconda repository <https://anaconda.org/multibuild-wheels-staging/pandas/files?version=\<version\>>`_
- where our CI published them to the ``dist/`` directory in your local pandas copy.
- You can use the script ``scripts/download_wheels.sh`` to download all wheels at once.
-
-7. Upload wheels to PyPI::
-
- twine upload pandas/dist/pandas-<version>*.{whl,tar.gz} --skip-existing
-
Post-Release
````````````
diff --git a/scripts/download_wheels.sh b/scripts/download_wheels.sh
index 0b92e83113f5f..84279ac7a04d1 100755
--- a/scripts/download_wheels.sh
+++ b/scripts/download_wheels.sh
@@ -11,6 +11,7 @@
# one by one to the dist/ directory where they would be generated.
VERSION=$1
+mkdir -p $(dirname -- $0)/../dist
DIST_DIR="$(realpath $(dirname -- $0)/../dist)"
if [ -z $VERSION ]; then
@@ -20,7 +21,7 @@ fi
curl "https://anaconda.org/multibuild-wheels-staging/pandas/files?version=${VERSION}" | \
grep "href=\"/multibuild-wheels-staging/pandas/${VERSION}" | \
- sed -r 's/.*<a href="([^"]+\.whl)">.*/\1/g' | \
+ sed -r 's/.*<a href="([^"]+\.(whl|tar.gz))">.*/\1/g' | \
awk '{print "https://anaconda.org" $0 }' | \
xargs wget -P $DIST_DIR
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. | https://api.github.com/repos/pandas-dev/pandas/pulls/56232 | 2023-11-28T22:57:04Z | 2023-11-29T17:43:03Z | 2023-11-29T17:43:03Z | 2023-12-18T16:26:09Z |
CoW warning mode: enable chained assignment warning for DataFrame setitem in default mode | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 008c1e0d10ba4..847f514451add 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4230,15 +4230,14 @@ def __setitem__(self, key, value) -> None:
warnings.warn(
_chained_assignment_msg, ChainedAssignmentError, stacklevel=2
)
- # elif not PYPY and not using_copy_on_write():
- elif not PYPY and warn_copy_on_write():
- if sys.getrefcount(self) <= 3: # and (
- # warn_copy_on_write()
- # or (
- # not warn_copy_on_write()
- # and self._mgr.blocks[0].refs.has_reference()
- # )
- # ):
+ elif not PYPY and not using_copy_on_write():
+ if sys.getrefcount(self) <= 3 and (
+ warn_copy_on_write()
+ or (
+ not warn_copy_on_write()
+ and any(b.refs.has_reference() for b in self._mgr.blocks) # type: ignore[union-attr]
+ )
+ ):
warnings.warn(
_chained_assignment_warning_msg, FutureWarning, stacklevel=2
)
diff --git a/pandas/tests/copy_view/test_chained_assignment_deprecation.py b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
index 7b08d9b80fc9b..32d0f55f67185 100644
--- a/pandas/tests/copy_view/test_chained_assignment_deprecation.py
+++ b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
@@ -1,9 +1,15 @@
import numpy as np
import pytest
-from pandas.errors import ChainedAssignmentError
+from pandas.errors import (
+ ChainedAssignmentError,
+ SettingWithCopyWarning,
+)
-from pandas import DataFrame
+from pandas import (
+ DataFrame,
+ option_context,
+)
import pandas._testing as tm
@@ -85,3 +91,17 @@ def test_series_setitem(indexer, using_copy_on_write):
else:
assert record[0].category == FutureWarning
assert "ChainedAssignmentError" in record[0].message.args[0]
+
+
+@pytest.mark.filterwarnings("ignore::pandas.errors.SettingWithCopyWarning")
+@pytest.mark.parametrize(
+ "indexer", ["a", ["a", "b"], slice(0, 2), np.array([True, False, True])]
+)
+def test_frame_setitem(indexer, using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3, 4, 5], "b": 1})
+
+ extra_warnings = () if using_copy_on_write else (SettingWithCopyWarning,)
+
+ with option_context("chained_assignment", "warn"):
+ with tm.raises_chained_assignment_error(extra_warnings=extra_warnings):
+ df[0:3][indexer] = 10
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index e613f1102b03b..53ad4d6b41687 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -548,7 +548,8 @@ def test_frame_setitem_copy_raises(
else:
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(SettingWithCopyError, match=msg):
- df["foo"]["one"] = 2
+ with tm.raises_chained_assignment_error():
+ df["foo"]["one"] = 2
def test_frame_setitem_copy_no_write(
@@ -563,7 +564,8 @@ def test_frame_setitem_copy_no_write(
else:
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(SettingWithCopyError, match=msg):
- df["foo"]["one"] = 2
+ with tm.raises_chained_assignment_error():
+ df["foo"]["one"] = 2
result = df
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index 230c575e6ed10..88cac9b16f8f7 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -452,7 +452,8 @@ def test_detect_chained_assignment_undefined_column(
df.iloc[0:5]["group"] = "a"
else:
with pytest.raises(SettingWithCopyError, match=msg):
- df.iloc[0:5]["group"] = "a"
+ with tm.raises_chained_assignment_error():
+ df.iloc[0:5]["group"] = "a"
@pytest.mark.arm_slow
def test_detect_chained_assignment_changing_dtype(
| Follow-up on https://github.com/pandas-dev/pandas/pull/55522#discussion_r1406831714
This ensures the chained assignment warning is shown in the default mode as well.
This does mean you currently get some noise cases of both this new warning and the existing SettingWithCopyWarning. But either this SettingWithCopy was a false positive and it was setting on a view (eg a far-fetched case like `df[0:3][0:2] = 10`), and then we really need to have the new warning as well because it will change behaviour. Or either it was a correct warning (e.g. `df[mask][other_indexer] = ..`, and in that case this has never been working, and adding an additional warning for a case that never works does no harm (and so a user can only have by accident, or potentially unaware it doesn't do what they intend).
xref https://github.com/pandas-dev/pandas/issues/56019 | https://api.github.com/repos/pandas-dev/pandas/pulls/56230 | 2023-11-28T19:15:08Z | 2023-12-04T08:29:32Z | 2023-12-04T08:29:32Z | 2023-12-04T08:29:50Z |
TST/CLN: Remove makeFloat/Period/Int/NumericIndex | diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py
index d70ad144a3455..5e23cba2e1074 100644
--- a/asv_bench/benchmarks/arithmetic.py
+++ b/asv_bench/benchmarks/arithmetic.py
@@ -6,12 +6,12 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
Series,
Timestamp,
date_range,
to_timedelta,
)
-import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
@@ -323,8 +323,10 @@ class IndexArithmetic:
def setup(self, dtype):
N = 10**6
- indexes = {"int": "makeIntIndex", "float": "makeFloatIndex"}
- self.index = getattr(tm, indexes[dtype])(N)
+ if dtype == "float":
+ self.index = Index(np.arange(N), dtype=np.float64)
+ elif dtype == "int":
+ self.index = Index(np.arange(N), dtype=np.int64)
def time_add(self, dtype):
self.index + 2
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index b58a8f706e5a6..b1918e1b1d7c2 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -27,12 +27,8 @@
from pandas.compat import pa_version_under10p1
from pandas.core.dtypes.common import (
- is_float_dtype,
is_sequence,
- is_signed_integer_dtype,
is_string_dtype,
- is_unsigned_integer_dtype,
- pandas_dtype,
)
import pandas as pd
@@ -46,6 +42,8 @@
RangeIndex,
Series,
bdate_range,
+ date_range,
+ period_range,
timedelta_range,
)
from pandas._testing._io import (
@@ -111,7 +109,6 @@
NpDtype,
)
- from pandas import PeriodIndex
from pandas.core.arrays import ArrowExtensionArray
_N = 30
@@ -351,38 +348,6 @@ def getCols(k) -> str:
return string.ascii_uppercase[:k]
-def makeNumericIndex(k: int = 10, *, name=None, dtype: Dtype | None) -> Index:
- dtype = pandas_dtype(dtype)
- assert isinstance(dtype, np.dtype)
-
- if dtype.kind in "iu":
- values = np.arange(k, dtype=dtype)
- if is_unsigned_integer_dtype(dtype):
- values += 2 ** (dtype.itemsize * 8 - 1)
- elif dtype.kind == "f":
- values = np.random.default_rng(2).random(k) - np.random.default_rng(2).random(1)
- values.sort()
- values = values * (10 ** np.random.default_rng(2).integers(0, 9))
- else:
- raise NotImplementedError(f"wrong dtype {dtype}")
-
- return Index(values, dtype=dtype, name=name)
-
-
-def makeIntIndex(k: int = 10, *, name=None, dtype: Dtype = "int64") -> Index:
- dtype = pandas_dtype(dtype)
- if not is_signed_integer_dtype(dtype):
- raise TypeError(f"Wrong dtype {dtype}")
- return makeNumericIndex(k, name=name, dtype=dtype)
-
-
-def makeFloatIndex(k: int = 10, *, name=None, dtype: Dtype = "float64") -> Index:
- dtype = pandas_dtype(dtype)
- if not is_float_dtype(dtype):
- raise TypeError(f"Wrong dtype {dtype}")
- return makeNumericIndex(k, name=name, dtype=dtype)
-
-
def makeDateIndex(
k: int = 10, freq: Frequency = "B", name=None, **kwargs
) -> DatetimeIndex:
@@ -391,12 +356,6 @@ def makeDateIndex(
return DatetimeIndex(dr, name=name, **kwargs)
-def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
- dt = datetime(2000, 1, 1)
- pi = pd.period_range(start=dt, periods=k, freq="D", name=name, **kwargs)
- return pi
-
-
def makeObjectSeries(name=None) -> Series:
data = [f"foo_{i}" for i in range(_N)]
index = Index([f"bar_{i}" for i in range(_N)])
@@ -487,12 +446,12 @@ def makeCustomIndex(
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
- "i": makeIntIndex,
- "f": makeFloatIndex,
+ "i": lambda n: Index(np.arange(n), dtype=np.int64),
+ "f": lambda n: Index(np.arange(n), dtype=np.float64),
"s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]),
- "dt": makeDateIndex,
+ "dt": lambda n: date_range("2020-01-01", periods=n),
"td": lambda n: timedelta_range("1 day", periods=n),
- "p": makePeriodIndex,
+ "p": lambda n: period_range("2020-01-01", periods=n, freq="D"),
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
@@ -975,11 +934,7 @@ def shares_memory(left, right) -> bool:
"makeCustomIndex",
"makeDataFrame",
"makeDateIndex",
- "makeFloatIndex",
- "makeIntIndex",
- "makeNumericIndex",
"makeObjectSeries",
- "makePeriodIndex",
"makeTimeDataFrame",
"makeTimeSeries",
"maybe_produces_warning",
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 1dae6d0043b61..1bc067eb32aef 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -68,6 +68,7 @@
Series,
Timedelta,
Timestamp,
+ period_range,
timedelta_range,
)
import pandas._testing as tm
@@ -616,23 +617,27 @@ def _create_mi_with_dt64tz_level():
"string": Index([f"pandas_{i}" for i in range(100)]),
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
- "period": tm.makePeriodIndex(100),
+ "period": period_range("2020-01-01", periods=100, freq="D"),
"timedelta": timedelta_range(start="1 day", periods=100, freq="D"),
"range": RangeIndex(100),
- "int8": tm.makeIntIndex(100, dtype="int8"),
- "int16": tm.makeIntIndex(100, dtype="int16"),
- "int32": tm.makeIntIndex(100, dtype="int32"),
- "int64": tm.makeIntIndex(100, dtype="int64"),
+ "int8": Index(np.arange(100), dtype="int8"),
+ "int16": Index(np.arange(100), dtype="int16"),
+ "int32": Index(np.arange(100), dtype="int32"),
+ "int64": Index(np.arange(100), dtype="int64"),
"uint8": Index(np.arange(100), dtype="uint8"),
"uint16": Index(np.arange(100), dtype="uint16"),
"uint32": Index(np.arange(100), dtype="uint32"),
"uint64": Index(np.arange(100), dtype="uint64"),
- "float32": tm.makeFloatIndex(100, dtype="float32"),
- "float64": tm.makeFloatIndex(100, dtype="float64"),
+ "float32": Index(np.arange(100), dtype="float32"),
+ "float64": Index(np.arange(100), dtype="float64"),
"bool-object": Index([True, False] * 5, dtype=object),
"bool-dtype": Index(np.random.default_rng(2).standard_normal(10) < 0),
- "complex64": tm.makeNumericIndex(100, dtype="float64").astype("complex64"),
- "complex128": tm.makeNumericIndex(100, dtype="float64").astype("complex128"),
+ "complex64": Index(
+ np.arange(100, dtype="complex64") + 1.0j * np.arange(100, dtype="complex64")
+ ),
+ "complex128": Index(
+ np.arange(100, dtype="complex128") + 1.0j * np.arange(100, dtype="complex128")
+ ),
"categorical": CategoricalIndex(list("abcd") * 25),
"interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=101)),
"empty": Index([]),
diff --git a/pandas/tests/extension/test_masked.py b/pandas/tests/extension/test_masked.py
index a5e8b2ed1efe5..be4077d921a9e 100644
--- a/pandas/tests/extension/test_masked.py
+++ b/pandas/tests/extension/test_masked.py
@@ -24,6 +24,12 @@
)
from pandas.compat.numpy import np_version_gt2
+from pandas.core.dtypes.common import (
+ is_float_dtype,
+ is_signed_integer_dtype,
+ is_unsigned_integer_dtype,
+)
+
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.boolean import BooleanDtype
@@ -281,7 +287,7 @@ def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
tm.assert_almost_equal(result, expected)
def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
- if tm.is_float_dtype(arr.dtype):
+ if is_float_dtype(arr.dtype):
cmp_dtype = arr.dtype.name
elif op_name in ["mean", "median", "var", "std", "skew"]:
cmp_dtype = "Float64"
@@ -289,7 +295,7 @@ def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
cmp_dtype = arr.dtype.name
elif arr.dtype in ["Int64", "UInt64"]:
cmp_dtype = arr.dtype.name
- elif tm.is_signed_integer_dtype(arr.dtype):
+ elif is_signed_integer_dtype(arr.dtype):
# TODO: Why does Window Numpy 2.0 dtype depend on skipna?
cmp_dtype = (
"Int32"
@@ -297,7 +303,7 @@ def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
or not IS64
else "Int64"
)
- elif tm.is_unsigned_integer_dtype(arr.dtype):
+ elif is_unsigned_integer_dtype(arr.dtype):
cmp_dtype = (
"UInt32"
if (is_platform_windows() and (not np_version_gt2 or not skipna))
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index 078a0e06e0ed7..778c07b46e57c 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -3,6 +3,7 @@
import numpy as np
import pytest
+from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import (
@@ -330,7 +331,7 @@ def get_kwargs_from_breaks(self, breaks, closed="right"):
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
- if tm.is_unsigned_integer_dtype(breaks):
+ if is_unsigned_integer_dtype(breaks):
pytest.skip(f"{breaks.dtype} not relevant IntervalIndex.from_tuples tests")
if len(breaks) == 0:
@@ -388,7 +389,7 @@ def get_kwargs_from_breaks(self, breaks, closed="right"):
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
- if tm.is_unsigned_integer_dtype(breaks):
+ if is_unsigned_integer_dtype(breaks):
pytest.skip(f"{breaks.dtype} not relevant for class constructor tests")
if len(breaks) == 0:
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 662f31cc3560e..3db81c0285bd2 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -507,8 +507,8 @@ def test_map_with_tuples(self):
# Test that returning a single tuple from an Index
# returns an Index.
- index = tm.makeIntIndex(3)
- result = tm.makeIntIndex(3).map(lambda x: (x,))
+ index = Index(np.arange(3), dtype=np.int64)
+ result = index.map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
@@ -555,7 +555,7 @@ def test_map_tseries_indices_accsr_return_index(self):
def test_map_dictlike_simple(self, mapper):
# GH 12756
expected = Index(["foo", "bar", "baz"])
- index = tm.makeIntIndex(3)
+ index = Index(np.arange(3), dtype=np.int64)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/io/pytables/test_time_series.py b/pandas/tests/io/pytables/test_time_series.py
index cfe25f03e1aab..4afcf5600dce6 100644
--- a/pandas/tests/io/pytables/test_time_series.py
+++ b/pandas/tests/io/pytables/test_time_series.py
@@ -8,6 +8,7 @@
DatetimeIndex,
Series,
_testing as tm,
+ period_range,
)
from pandas.tests.io.pytables.common import ensure_clean_store
@@ -36,7 +37,7 @@ def test_tseries_indices_series(setup_path):
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
- idx = tm.makePeriodIndex(10)
+ idx = period_range("2020-01-01", periods=10, freq="D")
ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
store["a"] = ser
result = store["a"]
@@ -60,7 +61,7 @@ def test_tseries_indices_frame(setup_path):
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
- idx = tm.makePeriodIndex(10)
+ idx = period_range("2020-01-01", periods=10, freq="D")
df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 3)), idx)
store["a"] = df
result = store["a"]
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 303f8550c5a80..ccfa3be702dae 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -23,6 +23,7 @@
Timestamp,
date_range,
isna,
+ period_range,
timedelta_range,
to_timedelta,
)
@@ -34,11 +35,13 @@
def get_objs():
indexes = [
Index([True, False] * 5, name="a"),
- tm.makeIntIndex(10, name="a"),
- tm.makeFloatIndex(10, name="a"),
- tm.makeDateIndex(10, name="a"),
- tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern"),
- tm.makePeriodIndex(10, name="a"),
+ Index(np.arange(10), dtype=np.int64, name="a"),
+ Index(np.arange(10), dtype=np.float64, name="a"),
+ DatetimeIndex(date_range("2020-01-01", periods=10), name="a"),
+ DatetimeIndex(date_range("2020-01-01", periods=10), name="a").tz_localize(
+ tz="US/Eastern"
+ ),
+ PeriodIndex(period_range("2020-01-01", periods=10, freq="D"), name="a"),
Index([str(i) for i in range(10)], name="a"),
]
@@ -534,7 +537,7 @@ def test_minmax_period_empty_nat(self, op, data):
assert result is NaT
def test_numpy_minmax_period(self):
- pr = pd.period_range(start="2016-01-15", end="2016-01-20")
+ pr = period_range(start="2016-01-15", end="2016-01-20")
assert np.min(pr) == Period("2016-01-15", freq="D")
assert np.max(pr) == Period("2016-01-20", freq="D")
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 0f3577a214186..cac7bd6de9d3b 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -237,7 +237,9 @@ def test_setitem_slice_integers(self):
def test_setitem_slicestep(self):
# caught this bug when writing tests
- series = Series(tm.makeIntIndex(20).astype(float), index=tm.makeIntIndex(20))
+ series = Series(
+ np.arange(20, dtype=np.float64), index=np.arange(20, dtype=np.int64)
+ )
series[::2] = 0
assert (series[::2] == 0).all()
diff --git a/pandas/tests/series/methods/test_combine_first.py b/pandas/tests/series/methods/test_combine_first.py
index 1cbb7c7982802..e1ec8afda33a9 100644
--- a/pandas/tests/series/methods/test_combine_first.py
+++ b/pandas/tests/series/methods/test_combine_first.py
@@ -32,8 +32,8 @@ def test_combine_first_name(self, datetime_series):
assert result.name == datetime_series.name
def test_combine_first(self):
- values = tm.makeIntIndex(20).values.astype(float)
- series = Series(values, index=tm.makeIntIndex(20))
+ values = np.arange(20, dtype=np.float64)
+ series = Series(values, index=np.arange(20, dtype=np.int64))
series_copy = series * 2
series_copy[::2] = np.nan
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index e23302b58b197..29d6e2036476e 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -10,6 +10,7 @@
Index,
Series,
date_range,
+ period_range,
timedelta_range,
)
import pandas._testing as tm
@@ -72,13 +73,12 @@ def test_tab_completion_with_categorical(self):
Index(list("ab") * 5, dtype="category"),
Index([str(i) for i in range(10)]),
Index(["foo", "bar", "baz"] * 2),
- tm.makeDateIndex(10),
- tm.makePeriodIndex(10),
+ date_range("2020-01-01", periods=10),
+ period_range("2020-01-01", periods=10, freq="D"),
timedelta_range("1 day", periods=10),
- tm.makeIntIndex(10),
Index(np.arange(10), dtype=np.uint64),
- tm.makeIntIndex(10),
- tm.makeFloatIndex(10),
+ Index(np.arange(10), dtype=np.int64),
+ Index(np.arange(10), dtype=np.float64),
Index([True, False]),
Index([f"a{i}" for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 972d403fff997..e08f8d0c15f39 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1337,7 +1337,7 @@ def test_constructor_dict(self):
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
- pidx = tm.makePeriodIndex(100)
+ pidx = period_range("2020-01-01", periods=10, freq="D")
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index 0417c7a631da2..4fa256a6b8630 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -148,7 +148,7 @@ def test_multiindex_objects():
),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
- Series(tm.makePeriodIndex()),
+ Series(period_range("2020-01-01", periods=10, freq="D")),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
],
)
@@ -181,7 +181,7 @@ def test_hash_pandas_object(obj, index):
),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
- Series(tm.makePeriodIndex()),
+ Series(period_range("2020-01-01", periods=10, freq="D")),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
],
)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56229 | 2023-11-28T17:45:42Z | 2023-11-29T17:51:55Z | 2023-11-29T17:51:55Z | 2023-11-29T17:51:59Z |
BUG: DataFrame.update not operating in-place for datetime64[ns, UTC] dtype | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index dce776755ad7e..90566f62bfdaf 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -520,7 +520,7 @@ Indexing
Missing
^^^^^^^
--
+- Bug in :meth:`DataFrame.update` wasn't updating in-place for tz-aware datetime64 dtypes (:issue:`56227`)
-
MultiIndex
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5d05983529fba..3931616d49baf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8838,14 +8838,14 @@ def update(
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
- ... 'B': [400, 500, 600]})
+ ... 'B': [400., 500., 600.]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
- A B
- 0 1 4
- 1 2 500
- 2 3 6
+ A B
+ 0 1 4.0
+ 1 2 500.0
+ 2 3 6.0
"""
if not PYPY and using_copy_on_write():
if sys.getrefcount(self) <= REF_COUNT:
@@ -8862,8 +8862,6 @@ def update(
stacklevel=2,
)
- from pandas.core.computation import expressions
-
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
@@ -8897,7 +8895,7 @@ def update(
if mask.all():
continue
- self.loc[:, col] = expressions.where(mask, this, that)
+ self.loc[:, col] = self[col].where(mask, that)
# ----------------------------------------------------------------------
# Data reshaping
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
index 0d32788b04b03..c79a37b5b30f0 100644
--- a/pandas/tests/frame/methods/test_update.py
+++ b/pandas/tests/frame/methods/test_update.py
@@ -140,6 +140,22 @@ def test_update_datetime_tz(self):
expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
tm.assert_frame_equal(result, expected)
+ def test_update_datetime_tz_in_place(self, using_copy_on_write, warn_copy_on_write):
+ # https://github.com/pandas-dev/pandas/issues/56227
+ result = DataFrame([pd.Timestamp("2019", tz="UTC")])
+ orig = result.copy()
+ view = result[:]
+ with tm.assert_produces_warning(
+ FutureWarning if warn_copy_on_write else None, match="Setting a value"
+ ):
+ result.update(result + pd.Timedelta(days=1))
+ expected = DataFrame([pd.Timestamp("2019-01-02", tz="UTC")])
+ tm.assert_frame_equal(result, expected)
+ if not using_copy_on_write:
+ tm.assert_frame_equal(view, expected)
+ else:
+ tm.assert_frame_equal(view, orig)
+
def test_update_with_different_dtype(self, using_copy_on_write):
# GH#3217
df = DataFrame({"a": [1, 3], "b": [np.nan, 2]})
| - [ ] closes #56227 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56228 | 2023-11-28T17:41:32Z | 2023-11-29T18:00:48Z | 2023-11-29T18:00:48Z | 2023-12-01T12:01:27Z |
Backport PR #56222 on branch 2.1.x (Add doc notes for deprecations) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3281d245dca56..564d572254f8d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9456,6 +9456,10 @@ def first(self, offset) -> Self:
"""
Select initial periods of time series data based on a date offset.
+ .. deprecated:: 2.1
+ :meth:`.first` is deprecated and will be removed in a future version.
+ Please create a mask and filter using `.loc` instead.
+
For a DataFrame with a sorted DatetimeIndex, this function can
select the first few rows based on a date offset.
@@ -9535,6 +9539,10 @@ def last(self, offset) -> Self:
"""
Select final periods of time series data based on a date offset.
+ .. deprecated:: 2.1
+ :meth:`.last` is deprecated and will be removed in a future version.
+ Please create a mask and filter using `.loc` instead.
+
For a DataFrame with a sorted DatetimeIndex, this function
selects the last few rows based on a date offset.
| Backport PR #56222: Add doc notes for deprecations | https://api.github.com/repos/pandas-dev/pandas/pulls/56225 | 2023-11-28T15:48:27Z | 2023-11-28T17:07:52Z | 2023-11-28T17:07:52Z | 2023-11-28T17:07:53Z |
Add doc notes for deprecations | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 56001fabfdc9d..579d66be994e6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9661,6 +9661,10 @@ def first(self, offset) -> Self:
"""
Select initial periods of time series data based on a date offset.
+ .. deprecated:: 2.1
+ :meth:`.first` is deprecated and will be removed in a future version.
+ Please create a mask and filter using `.loc` instead.
+
For a DataFrame with a sorted DatetimeIndex, this function can
select the first few rows based on a date offset.
@@ -9740,6 +9744,10 @@ def last(self, offset) -> Self:
"""
Select final periods of time series data based on a date offset.
+ .. deprecated:: 2.1
+ :meth:`.last` is deprecated and will be removed in a future version.
+ Please create a mask and filter using `.loc` instead.
+
For a DataFrame with a sorted DatetimeIndex, this function
selects the last few rows based on a date offset.
| We should probably back port | https://api.github.com/repos/pandas-dev/pandas/pulls/56222 | 2023-11-28T14:05:17Z | 2023-11-28T15:47:18Z | 2023-11-28T15:47:18Z | 2023-11-28T15:53:33Z |
Switch arrow type for string array to large string | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 70039cc697b8a..e0aa4d1306c95 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -236,6 +236,8 @@ Other enhancements
- Implemented :meth:`Series.str.extract` for :class:`ArrowDtype` (:issue:`56268`)
- Improved error message that appears in :meth:`DatetimeIndex.to_period` with frequencies which are not supported as period frequencies, such as "BMS" (:issue:`56243`)
- Improved error message when constructing :class:`Period` with invalid offsets such as "QS" (:issue:`55785`)
+- The dtypes ``string[pyarrow]`` and ``string[pyarrow_numpy]`` now both utilize the ``large_string`` type from PyArrow to avoid overflow for long columns (:issue:`56259`)
+
.. ---------------------------------------------------------------------------
.. _whatsnew_220.notable_bug_fixes:
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 633efe43fce1a..d7bec102c43ca 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -291,6 +291,7 @@ def _from_sequence_of_strings(
pa_type is None
or pa.types.is_binary(pa_type)
or pa.types.is_string(pa_type)
+ or pa.types.is_large_string(pa_type)
):
# pa_type is None: Let pa.array infer
# pa_type is string/binary: scalars already correct type
@@ -632,7 +633,9 @@ def __invert__(self) -> Self:
# This is a bit wise op for integer types
if pa.types.is_integer(self._pa_array.type):
return type(self)(pc.bit_wise_not(self._pa_array))
- elif pa.types.is_string(self._pa_array.type):
+ elif pa.types.is_string(self._pa_array.type) or pa.types.is_large_string(
+ self._pa_array.type
+ ):
# Raise TypeError instead of pa.ArrowNotImplementedError
raise TypeError("__invert__ is not supported for string dtypes")
else:
@@ -692,7 +695,11 @@ def _evaluate_op_method(self, other, op, arrow_funcs):
pa_type = self._pa_array.type
other = self._box_pa(other)
- if pa.types.is_string(pa_type) or pa.types.is_binary(pa_type):
+ if (
+ pa.types.is_string(pa_type)
+ or pa.types.is_large_string(pa_type)
+ or pa.types.is_binary(pa_type)
+ ):
if op in [operator.add, roperator.radd]:
sep = pa.scalar("", type=pa_type)
if op is operator.add:
@@ -709,7 +716,9 @@ def _evaluate_op_method(self, other, op, arrow_funcs):
result = pc.binary_repeat(binary, pa_integral)
return type(self)(result)
elif (
- pa.types.is_string(other.type) or pa.types.is_binary(other.type)
+ pa.types.is_string(other.type)
+ or pa.types.is_binary(other.type)
+ or pa.types.is_large_string(other.type)
) and op in [operator.mul, roperator.rmul]:
binary = other
integral = self._pa_array
@@ -1471,7 +1480,7 @@ def _concat_same_type(cls, to_concat) -> Self:
chunks = [array for ea in to_concat for array in ea._pa_array.iterchunks()]
if to_concat[0].dtype == "string":
# StringDtype has no attribute pyarrow_dtype
- pa_dtype = pa.string()
+ pa_dtype = pa.large_string()
else:
pa_dtype = to_concat[0].dtype.pyarrow_dtype
arr = pa.chunked_array(chunks, type=pa_dtype)
@@ -2253,7 +2262,9 @@ def _str_find(self, sub: str, start: int = 0, end: int | None = None):
return type(self)(result)
def _str_join(self, sep: str):
- if pa.types.is_string(self._pa_array.type):
+ if pa.types.is_string(self._pa_array.type) or pa.types.is_large_string(
+ self._pa_array.type
+ ):
result = self._apply_elementwise(list)
result = pa.chunked_array(result, type=pa.list_(pa.string()))
else:
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 32ab3054c0f51..56732619a2d29 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -126,17 +126,40 @@ class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringAr
_storage = "pyarrow"
def __init__(self, values) -> None:
+ _chk_pyarrow_available()
+ if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_string(
+ values.type
+ ):
+ values = pc.cast(values, pa.large_string())
+
super().__init__(values)
self._dtype = StringDtype(storage=self._storage)
- if not pa.types.is_string(self._pa_array.type) and not (
+ if not pa.types.is_large_string(self._pa_array.type) and not (
pa.types.is_dictionary(self._pa_array.type)
- and pa.types.is_string(self._pa_array.type.value_type)
+ and pa.types.is_large_string(self._pa_array.type.value_type)
):
raise ValueError(
- "ArrowStringArray requires a PyArrow (chunked) array of string type"
+ "ArrowStringArray requires a PyArrow (chunked) array of "
+ "large_string type"
)
+ @classmethod
+ def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar:
+ pa_scalar = super()._box_pa_scalar(value, pa_type)
+ if pa.types.is_string(pa_scalar.type) and pa_type is None:
+ pa_scalar = pc.cast(pa_scalar, pa.large_string())
+ return pa_scalar
+
+ @classmethod
+ def _box_pa_array(
+ cls, value, pa_type: pa.DataType | None = None, copy: bool = False
+ ) -> pa.Array | pa.ChunkedArray:
+ pa_array = super()._box_pa_array(value, pa_type)
+ if pa.types.is_string(pa_array.type) and pa_type is None:
+ pa_array = pc.cast(pa_array, pa.large_string())
+ return pa_array
+
def __len__(self) -> int:
"""
Length of this array.
@@ -574,15 +597,6 @@ def _rank(
class ArrowStringArrayNumpySemantics(ArrowStringArray):
_storage = "pyarrow_numpy"
- def __init__(self, values) -> None:
- _chk_pyarrow_available()
-
- if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_large_string(
- values.type
- ):
- values = pc.cast(values, pa.string())
- super().__init__(values)
-
@classmethod
def _result_converter(cls, values, na=None):
if not isna(na):
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 12118d1488932..b0fa6bc6e90c4 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -172,9 +172,17 @@ def _convert_arrays_to_dataframe(
)
if dtype_backend == "pyarrow":
pa = import_optional_dependency("pyarrow")
- arrays = [
- ArrowExtensionArray(pa.array(arr, from_pandas=True)) for arr in arrays
- ]
+
+ result_arrays = []
+ for arr in arrays:
+ pa_array = pa.array(arr, from_pandas=True)
+ if arr.dtype == "string":
+ # TODO: Arrow still infers strings arrays as regular strings instead
+ # of large_string, which is what we preserver everywhere else for
+ # dtype_backend="pyarrow". We may want to reconsider this
+ pa_array = pa_array.cast(pa.string())
+ result_arrays.append(ArrowExtensionArray(pa_array))
+ arrays = result_arrays # type: ignore[assignment]
if arrays:
df = DataFrame(dict(zip(list(range(len(columns))), arrays)))
df.columns = columns
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 41255b2516e7e..320bdca60a932 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -487,13 +487,15 @@ def test_fillna_args(dtype, arrow_string_storage):
def test_arrow_array(dtype):
# protocol added in 0.15.0
pa = pytest.importorskip("pyarrow")
+ import pyarrow.compute as pc
data = pd.array(["a", "b", "c"], dtype=dtype)
arr = pa.array(data)
- expected = pa.array(list(data), type=pa.string(), from_pandas=True)
+ expected = pa.array(list(data), type=pa.large_string(), from_pandas=True)
if dtype.storage in ("pyarrow", "pyarrow_numpy") and pa_version_under12p0:
expected = pa.chunked_array(expected)
-
+ if dtype.storage == "python":
+ expected = pc.cast(expected, pa.string())
assert arr.equals(expected)
@@ -512,7 +514,10 @@ def test_arrow_roundtrip(dtype, string_storage2, request, using_infer_string):
data = pd.array(["a", "b", None], dtype=dtype)
df = pd.DataFrame({"a": data})
table = pa.table(df)
- assert table.field("a").type == "string"
+ if dtype.storage == "python":
+ assert table.field("a").type == "string"
+ else:
+ assert table.field("a").type == "large_string"
with pd.option_context("string_storage", string_storage2):
result = table.to_pandas()
assert isinstance(result["a"].dtype, pd.StringDtype)
@@ -539,7 +544,10 @@ def test_arrow_load_from_zero_chunks(
data = pd.array([], dtype=dtype)
df = pd.DataFrame({"a": data})
table = pa.table(df)
- assert table.field("a").type == "string"
+ if dtype.storage == "python":
+ assert table.field("a").type == "string"
+ else:
+ assert table.field("a").type == "large_string"
# Instantiate the same table with no chunks at all
table = pa.table([pa.chunked_array([], type=pa.string())], schema=table.schema)
with pd.option_context("string_storage", string_storage2):
diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py
index 222b77cb4e94f..d7811b6fed883 100644
--- a/pandas/tests/arrays/string_/test_string_arrow.py
+++ b/pandas/tests/arrays/string_/test_string_arrow.py
@@ -61,7 +61,7 @@ def test_constructor_not_string_type_raises(array, chunked, arrow_string_storage
msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowExtensionArray"
else:
msg = re.escape(
- "ArrowStringArray requires a PyArrow (chunked) array of string type"
+ "ArrowStringArray requires a PyArrow (chunked) array of large_string type"
)
with pytest.raises(ValueError, match=msg):
ArrowStringArray(arr)
@@ -76,17 +76,20 @@ def test_constructor_not_string_type_value_dictionary_raises(chunked):
arr = pa.chunked_array(arr)
msg = re.escape(
- "ArrowStringArray requires a PyArrow (chunked) array of string type"
+ "ArrowStringArray requires a PyArrow (chunked) array of large_string type"
)
with pytest.raises(ValueError, match=msg):
ArrowStringArray(arr)
+@pytest.mark.xfail(
+ reason="dict conversion does not seem to be implemented for large string in arrow"
+)
@pytest.mark.parametrize("chunked", [True, False])
def test_constructor_valid_string_type_value_dictionary(chunked):
pa = pytest.importorskip("pyarrow")
- arr = pa.array(["1", "2", "3"], pa.dictionary(pa.int32(), pa.utf8()))
+ arr = pa.array(["1", "2", "3"], pa.large_string()).dictionary_encode()
if chunked:
arr = pa.chunked_array(arr)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 58a1e51d31b74..0eefb0b52c483 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -2054,6 +2054,13 @@ def test_read_json_dtype_backend(
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
+
else:
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 8566b87ef4292..bed2b5e10a6f7 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -971,6 +971,12 @@ def test_dtype_backend(string_storage, dtype_backend):
if string_storage == "python":
arr = StringArray(np.array(["a", "b"], dtype=np.object_))
arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ arr = ArrowExtensionArray(pa.array(["a", "b"]))
+ arr_na = ArrowExtensionArray(pa.array([None, "a"]))
else:
pa = pytest.importorskip("pyarrow")
arr = ArrowStringArray(pa.array(["a", "b"]))
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index 8564f09ef7ae9..3c0208fcc74ec 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -359,6 +359,13 @@ def test_read_clipboard_dtype_backend(
string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
+ elif dtype_backend == "pyarrow" and engine != "c":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["x", "y"]))
+ string_array_na = ArrowExtensionArray(pa.array(["x", None]))
+
else:
string_array = ArrowStringArray(pa.array(["x", "y"]))
string_array_na = ArrowStringArray(pa.array(["x", None]))
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 15c5953e79bda..22a7d3b83a459 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -186,6 +186,12 @@ def test_read_feather_dtype_backend(self, string_storage, dtype_backend):
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
+
else:
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index f0256316e1689..607357e709b6e 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -183,7 +183,12 @@ def test_dtype_backend(self, string_storage, dtype_backend, flavor_read_html):
if string_storage == "python":
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
else:
pa = pytest.importorskip("pyarrow")
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index e3272e5f5902d..6645aefd4f0a7 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -3647,6 +3647,13 @@ def func(storage, dtype_backend, conn_name) -> DataFrame:
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) # type: ignore[assignment]
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) # type: ignore[assignment]
+
else:
pa = pytest.importorskip("pyarrow")
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index e4456b0a78e06..6f429c1ecbf8a 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -2044,6 +2044,13 @@ def test_read_xml_nullable_dtypes(
string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["x", "y"]))
+ string_array_na = ArrowExtensionArray(pa.array(["x", None]))
+
else:
pa = pytest.importorskip("pyarrow")
string_array = ArrowStringArray(pa.array(["x", "y"]))
| - [ ] closes #56259
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
large string is a more sensible default (take concatenates the chunks in pyarrow which can cause overflows pretty quickly), large string should avoid this
one todo for a follow up:
- ensure interoperability with ``"string[pyarrow]"``
Let's see if CI likes this | https://api.github.com/repos/pandas-dev/pandas/pulls/56220 | 2023-11-28T11:48:41Z | 2023-12-21T21:05:39Z | 2023-12-21T21:05:39Z | 2023-12-21T21:05:42Z |
DOC: DataFrame.update doctests | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6110f694a6700..b6493614e05a0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8826,26 +8826,27 @@ def update(
1 b e
2 c f
- For Series, its name attribute must be set.
-
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
- >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
- >>> df.update(new_column)
+ >>> new_df = pd.DataFrame({'B': ['d', 'f']}, index=[0, 2])
+ >>> df.update(new_df)
>>> df
A B
0 a d
1 b y
- 2 c e
+ 2 c f
+
+ For Series, its name attribute must be set.
+
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
- >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
- >>> df.update(new_df)
+ >>> new_column = pd.Series(['d', 'e', 'f'], name='B')
+ >>> df.update(new_column)
>>> df
A B
- 0 a x
- 1 b d
- 2 c e
+ 0 a d
+ 1 b e
+ 2 c f
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
|
I felt this original documentation was doing too much in this code block - the description doesn't match the second code example.
<kbd><img width="762" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/cd897229-5366-40d9-8034-5521b7ecd0e7"></kbd>
So I broke it out into these two separate examples:
<kbd><img width="974" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/36585ddc-d14b-485e-917e-e53f5609c4c3"></kbd>
---
I added this new example, which I would have found useful:
<kbd><img width="935" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/b82fffd1-c095-4784-a2ec-b764b9093a29"></kbd>
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56219 | 2023-11-28T06:53:02Z | 2023-11-29T17:48:12Z | 2023-11-29T17:48:12Z | 2023-11-29T17:48:23Z |
TST: specify dt64 units | diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 0d71fb0926df9..5456615f6f028 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -798,7 +798,7 @@ def test_std_timedelta64_skipna_false(self):
"values", [["2022-01-01", "2022-01-02", pd.NaT, "2022-01-03"], 4 * [pd.NaT]]
)
def test_std_datetime64_with_nat(
- self, values, skipna, using_array_manager, request
+ self, values, skipna, using_array_manager, request, unit
):
# GH#51335
if using_array_manager and (
@@ -808,13 +808,14 @@ def test_std_datetime64_with_nat(
reason="GH#51446: Incorrect type inference on NaT in reduction result"
)
request.applymarker(mark)
- df = DataFrame({"a": to_datetime(values)})
+ dti = to_datetime(values).as_unit(unit)
+ df = DataFrame({"a": dti})
result = df.std(skipna=skipna)
if not skipna or all(value is pd.NaT for value in values):
- expected = Series({"a": pd.NaT}, dtype="timedelta64[ns]")
+ expected = Series({"a": pd.NaT}, dtype=f"timedelta64[{unit}]")
else:
# 86400000000000ns == 1 day
- expected = Series({"a": 86400000000000}, dtype="timedelta64[ns]")
+ expected = Series({"a": 86400000000000}, dtype=f"timedelta64[{unit}]")
tm.assert_series_equal(result, expected)
def test_sum_corner(self):
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index b8b34d365d051..a5ac5b09bfd34 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -537,7 +537,9 @@ def test_groupby_groups_datetimeindex2(self):
for date in dates:
result = grouped.get_group(date)
data = [[df.loc[date, "A"], df.loc[date, "B"]]]
- expected_index = DatetimeIndex([date], name="date", freq="D")
+ expected_index = DatetimeIndex(
+ [date], name="date", freq="D", dtype=index.dtype
+ )
expected = DataFrame(data, columns=list("AB"), index=expected_index)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/methods/test_round.py b/pandas/tests/indexes/datetimes/methods/test_round.py
index ad5dd37eacd7c..cde4a3a65804d 100644
--- a/pandas/tests/indexes/datetimes/methods/test_round.py
+++ b/pandas/tests/indexes/datetimes/methods/test_round.py
@@ -40,9 +40,9 @@ def test_round_invalid(self, freq, error_msg):
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
- def test_round(self, tz_naive_fixture):
+ def test_round(self, tz_naive_fixture, unit):
tz = tz_naive_fixture
- rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz)
+ rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz, unit=unit)
elt = rng[1]
expected_rng = DatetimeIndex(
@@ -53,10 +53,11 @@ def test_round(self, tz_naive_fixture):
Timestamp("2016-01-01 02:00:00", tz=tz),
Timestamp("2016-01-01 02:00:00", tz=tz),
]
- )
+ ).as_unit(unit)
expected_elt = expected_rng[1]
- tm.assert_index_equal(rng.round(freq="h"), expected_rng)
+ result = rng.round(freq="h")
+ tm.assert_index_equal(result, expected_rng)
assert elt.round(freq="h") == expected_elt
msg = INVALID_FREQ_ERR_MSG
@@ -74,9 +75,9 @@ def test_round(self, tz_naive_fixture):
def test_round2(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH#14440 & GH#15578
- index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz)
+ index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz).as_unit("ns")
result = index.round("ms")
- expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz)
+ expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz).as_unit("ns")
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
@@ -84,20 +85,21 @@ def test_round2(self, tz_naive_fixture):
def test_round3(self, tz_naive_fixture):
tz = tz_naive_fixture
- index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz)
+ index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz).as_unit("ns")
result = index.round("ms")
- expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz)
+ expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz).as_unit("ns")
tm.assert_index_equal(result, expected)
def test_round4(self, tz_naive_fixture):
- index = DatetimeIndex(["2016-10-17 12:00:00.001501031"])
+ index = DatetimeIndex(["2016-10-17 12:00:00.001501031"], dtype="M8[ns]")
result = index.round("10ns")
- expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"])
+ expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"], dtype="M8[ns]")
tm.assert_index_equal(result, expected)
+ ts = "2016-10-17 12:00:00.001501031"
+ dti = DatetimeIndex([ts], dtype="M8[ns]")
with tm.assert_produces_warning(False):
- ts = "2016-10-17 12:00:00.001501031"
- DatetimeIndex([ts]).round("1010ns")
+ dti.round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
@@ -112,9 +114,10 @@ def test_no_rounding_occurs(self, tz_naive_fixture):
Timestamp("2016-01-01 00:06:00", tz=tz),
Timestamp("2016-01-01 00:08:00", tz=tz),
]
- )
+ ).as_unit("ns")
- tm.assert_index_equal(rng.round(freq="2min"), expected_rng)
+ result = rng.round(freq="2min")
+ tm.assert_index_equal(result, expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 35c3604de3d63..4a0b192244dc8 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -707,10 +707,14 @@ def test_constructor_dtype(self):
idx = DatetimeIndex(
["2013-01-01", "2013-01-02"], dtype="datetime64[ns, US/Eastern]"
)
- expected = DatetimeIndex(["2013-01-01", "2013-01-02"]).tz_localize("US/Eastern")
+ expected = (
+ DatetimeIndex(["2013-01-01", "2013-01-02"])
+ .as_unit("ns")
+ .tz_localize("US/Eastern")
+ )
tm.assert_index_equal(idx, expected)
- idx = DatetimeIndex(["2013-01-01", "2013-01-02"], tz="US/Eastern")
+ idx = DatetimeIndex(["2013-01-01", "2013-01-02"], tz="US/Eastern").as_unit("ns")
tm.assert_index_equal(idx, expected)
def test_constructor_dtype_tz_mismatch_raises(self):
@@ -774,7 +778,7 @@ def test_constructor_start_end_with_tz(self, tz):
result = date_range(freq="D", start=start, end=end, tz=tz)
expected = DatetimeIndex(
["2013-01-01 06:00:00", "2013-01-02 06:00:00"],
- tz="America/Los_Angeles",
+ dtype="M8[ns, America/Los_Angeles]",
freq="D",
)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index b7932715c3ac7..bfbcdcff51ee6 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -226,58 +226,54 @@ def test_take_nan_first_datetime(self):
expected = DatetimeIndex([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
- def test_take(self):
+ @pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
+ def test_take(self, tz):
# GH#10295
- idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
- idx2 = date_range(
- "2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
+ idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx", tz=tz)
+
+ result = idx.take([0])
+ assert result == Timestamp("2011-01-01", tz=idx.tz)
+
+ result = idx.take([0, 1, 2])
+ expected = date_range(
+ "2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx"
)
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
- for idx in [idx1, idx2]:
- result = idx.take([0])
- assert result == Timestamp("2011-01-01", tz=idx.tz)
+ result = idx.take([0, 2, 4])
+ expected = date_range(
+ "2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
- result = idx.take([0, 1, 2])
- expected = date_range(
- "2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
+ result = idx.take([7, 4, 1])
+ expected = date_range(
+ "2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
- result = idx.take([0, 2, 4])
- expected = date_range(
- "2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
+ result = idx.take([3, 2, 5])
+ expected = DatetimeIndex(
+ ["2011-01-04", "2011-01-03", "2011-01-06"],
+ dtype=idx.dtype,
+ freq=None,
+ name="idx",
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq is None
- result = idx.take([7, 4, 1])
- expected = date_range(
- "2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
-
- result = idx.take([3, 2, 5])
- expected = DatetimeIndex(
- ["2011-01-04", "2011-01-03", "2011-01-06"],
- dtype=idx.dtype,
- freq=None,
- name="idx",
- )
- tm.assert_index_equal(result, expected)
- assert result.freq is None
-
- result = idx.take([-3, 2, 5])
- expected = DatetimeIndex(
- ["2011-01-29", "2011-01-03", "2011-01-06"],
- dtype=idx.dtype,
- freq=None,
- tz=idx.tz,
- name="idx",
- )
- tm.assert_index_equal(result, expected)
- assert result.freq is None
+ result = idx.take([-3, 2, 5])
+ expected = DatetimeIndex(
+ ["2011-01-29", "2011-01-03", "2011-01-06"],
+ dtype=idx.dtype,
+ freq=None,
+ name="idx",
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq is None
def test_take_invalid_kwargs(self):
idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 03691ca318037..45a9c207f0acc 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -256,13 +256,13 @@ def test_insert_float_index(
def test_insert_index_datetimes(self, fill_val, exp_dtype, insert_value):
obj = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], tz=fill_val.tz
- )
+ ).as_unit("ns")
assert obj.dtype == exp_dtype
exp = pd.DatetimeIndex(
["2011-01-01", fill_val.date(), "2011-01-02", "2011-01-03", "2011-01-04"],
tz=fill_val.tz,
- )
+ ).as_unit("ns")
self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)
if fill_val.tz:
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 6510612ba6f87..af7533399ea74 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -57,7 +57,10 @@ def test_indexing_fast_xs(self):
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
- [Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
+ [Timestamp("2014-01-06 00:00:00+0000", tz="UTC")],
+ index=["a"],
+ name=5,
+ dtype="M8[ns, UTC]",
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/reshape/concat/test_datetimes.py b/pandas/tests/reshape/concat/test_datetimes.py
index c061a393bb1a6..c00a2dc92a52b 100644
--- a/pandas/tests/reshape/concat/test_datetimes.py
+++ b/pandas/tests/reshape/concat/test_datetimes.py
@@ -52,18 +52,14 @@ def test_concat_datetime_timezone(self):
df2 = DataFrame({"b": [1, 2, 3]}, index=idx2)
result = concat([df1, df2], axis=1)
- exp_idx = (
- DatetimeIndex(
- [
- "2011-01-01 00:00:00+01:00",
- "2011-01-01 01:00:00+01:00",
- "2011-01-01 02:00:00+01:00",
- ],
- freq="h",
- )
- .tz_convert("UTC")
- .tz_convert("Europe/Paris")
- .as_unit("ns")
+ exp_idx = DatetimeIndex(
+ [
+ "2011-01-01 00:00:00+01:00",
+ "2011-01-01 01:00:00+01:00",
+ "2011-01-01 02:00:00+01:00",
+ ],
+ dtype="M8[ns, Europe/Paris]",
+ freq="h",
)
expected = DataFrame(
@@ -431,21 +427,25 @@ def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame(
{
- "dt": [
- datetime(2014, 1, 1),
- datetime(2014, 1, 2),
- datetime(2014, 1, 3),
- ],
+ "dt": DatetimeIndex(
+ [
+ datetime(2014, 1, 1),
+ datetime(2014, 1, 2),
+ datetime(2014, 1, 3),
+ ],
+ dtype="M8[ns, US/Pacific]",
+ ),
"b": ["A", "B", "C"],
"c": [1, 2, 3],
"d": [4, 5, 6],
}
)
- df["dt"] = df["dt"].apply(lambda d: Timestamp(d, tz="US/Pacific"))
df = df.set_index(["dt", "b"])
exp_idx1 = DatetimeIndex(
- ["2014-01-01", "2014-01-02", "2014-01-03"] * 2, tz="US/Pacific", name="dt"
+ ["2014-01-01", "2014-01-02", "2014-01-03"] * 2,
+ dtype="M8[ns, US/Pacific]",
+ name="dt",
)
exp_idx2 = Index(["A", "B", "C"] * 2, name="b")
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 168be838ec768..0f3577a214186 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -126,7 +126,8 @@ def test_setitem_with_tz_dst(self, indexer_sli):
Timestamp("2016-11-06 00:00-04:00", tz=tz),
Timestamp("2011-01-01 00:00-05:00", tz=tz),
Timestamp("2016-11-06 01:00-05:00", tz=tz),
- ]
+ ],
+ dtype=orig.dtype,
)
# scalar
@@ -138,6 +139,7 @@ def test_setitem_with_tz_dst(self, indexer_sli):
vals = Series(
[Timestamp("2011-01-01", tz=tz), Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
+ dtype=orig.dtype,
)
assert vals.dtype == f"datetime64[ns, {tz}]"
@@ -146,7 +148,8 @@ def test_setitem_with_tz_dst(self, indexer_sli):
Timestamp("2016-11-06 00:00", tz=tz),
Timestamp("2011-01-01 00:00", tz=tz),
Timestamp("2012-01-01 00:00", tz=tz),
- ]
+ ],
+ dtype=orig.dtype,
)
ser = orig.copy()
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index f77bff049124b..972d403fff997 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1154,24 +1154,24 @@ def test_constructor_with_datetime_tz5(self):
def test_constructor_with_datetime_tz4(self):
# inference
- s = Series(
+ ser = Series(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
- assert s.dtype == "datetime64[ns, US/Pacific]"
- assert lib.infer_dtype(s, skipna=True) == "datetime64"
+ assert ser.dtype == "datetime64[ns, US/Pacific]"
+ assert lib.infer_dtype(ser, skipna=True) == "datetime64"
def test_constructor_with_datetime_tz3(self):
- s = Series(
+ ser = Series(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
- assert s.dtype == "object"
- assert lib.infer_dtype(s, skipna=True) == "datetime"
+ assert ser.dtype == "object"
+ assert lib.infer_dtype(ser, skipna=True) == "datetime"
def test_constructor_with_datetime_tz2(self):
# with all NaT
@@ -1587,7 +1587,7 @@ def test_NaT_scalar(self):
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype("M8[ns]")
- expected = Series([NaT])
+ expected = Series([NaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 508e03f617376..8139fe52c7037 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -625,7 +625,7 @@ def test_to_datetime_mixed_date_and_string(self, format):
# https://github.com/pandas-dev/pandas/issues/50108
d1 = date(2020, 1, 2)
res = to_datetime(["2020-01-01", d1], format=format)
- expected = DatetimeIndex(["2020-01-01", "2020-01-02"])
+ expected = DatetimeIndex(["2020-01-01", "2020-01-02"], dtype="M8[ns]")
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize(
@@ -1348,7 +1348,9 @@ def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
],
)
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
- expected = Series([Timestamp("2013-01-01 01:00:00", tz="UTC")])
+ expected = Series(
+ [Timestamp("2013-01-01 01:00:00", tz="UTC")], dtype="M8[ns, UTC]"
+ )
result = to_datetime(Series([date], dtype=dtype), utc=True, cache=cache)
tm.assert_series_equal(result, expected)
@@ -1853,7 +1855,7 @@ class TestToDatetimeUnit:
def test_to_datetime_month_or_year_unit_int(self, cache, unit, item, request):
# GH#50870 Note we have separate tests that pd.Timestamp gets these right
ts = Timestamp(item, unit=unit)
- expected = DatetimeIndex([ts])
+ expected = DatetimeIndex([ts], dtype="M8[ns]")
result = to_datetime([item], unit=unit, cache=cache)
tm.assert_index_equal(result, expected)
@@ -1929,7 +1931,8 @@ def test_unit_array_mixed_nans(self, cache):
result = to_datetime(values, unit="D", errors="coerce", cache=cache)
expected = DatetimeIndex(
- ["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"]
+ ["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"],
+ dtype="M8[ns]",
)
tm.assert_index_equal(result, expected)
@@ -1972,7 +1975,9 @@ def test_unit_consistency(self, cache, error):
def test_unit_with_numeric(self, cache, errors, dtype):
# GH 13180
# coercions from floats/ints are ok
- expected = DatetimeIndex(["2015-06-19 05:33:20", "2015-05-27 22:33:20"])
+ expected = DatetimeIndex(
+ ["2015-06-19 05:33:20", "2015-05-27 22:33:20"], dtype="M8[ns]"
+ )
arr = np.array([1.434692e18, 1.432766e18]).astype(dtype)
result = to_datetime(arr, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
@@ -1995,7 +2000,7 @@ def test_unit_with_numeric(self, cache, errors, dtype):
def test_unit_with_numeric_coerce(self, cache, exp, arr, warning):
# but we want to make sure that we are coercing
# if we have ints/strings
- expected = DatetimeIndex(exp)
+ expected = DatetimeIndex(exp, dtype="M8[ns]")
with tm.assert_produces_warning(warning, match="Could not infer format"):
result = to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
@@ -2044,7 +2049,7 @@ def test_unit_ignore_keeps_name(self, cache):
def test_to_datetime_errors_ignore_utc_true(self):
# GH#23758
result = to_datetime([1], unit="s", utc=True, errors="ignore")
- expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC")
+ expected = DatetimeIndex(["1970-01-01 00:00:01"], dtype="M8[ns, UTC]")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", [int, float])
@@ -2064,7 +2069,8 @@ def test_to_datetime_unit_with_nulls(self, null):
result = to_datetime(ser, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
- + [NaT]
+ + [NaT],
+ dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
@@ -2078,7 +2084,8 @@ def test_to_datetime_unit_fractional_seconds(self):
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
for t in np.arange(0, 2, 0.25)
]
- + [NaT]
+ + [NaT],
+ dtype="M8[ns]",
)
# GH20455 argument will incur floating point errors but no premature rounding
result = result.round("ms")
@@ -2087,7 +2094,8 @@ def test_to_datetime_unit_fractional_seconds(self):
def test_to_datetime_unit_na_values(self):
result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D")
expected = DatetimeIndex(
- [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3
+ [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3,
+ dtype="M8[ns]",
)
tm.assert_index_equal(result, expected)
@@ -2101,7 +2109,8 @@ def test_to_datetime_unit_invalid(self, bad_val):
def test_to_timestamp_unit_coerce(self, bad_val):
# coerce we can process
expected = DatetimeIndex(
- [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1
+ [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1,
+ dtype="M8[ns]",
)
result = to_datetime([1, 2, bad_val], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 97566750a5225..ddf56e68b1611 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -504,7 +504,7 @@ def test_add_empty_datetimeindex(self, offset_types, tz_naive_fixture):
# GH#12724, GH#30336
offset_s = _create_offset(offset_types)
- dti = DatetimeIndex([], tz=tz_naive_fixture)
+ dti = DatetimeIndex([], tz=tz_naive_fixture).as_unit("ns")
warn = None
if isinstance(
| Aimed at trimming the diff in #55901 | https://api.github.com/repos/pandas-dev/pandas/pulls/56217 | 2023-11-28T00:28:35Z | 2023-11-28T15:43:47Z | 2023-11-28T15:43:47Z | 2023-11-28T16:04:49Z |
impl for DP | diff --git a/pandas/_typing.py b/pandas/_typing.py
index c2d51f63eb2ab..77984a7088e7c 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -28,7 +28,11 @@
overload,
)
-import numpy as np
+import os
+if os.environ.get("DP_NUMPY", "1") == "0":
+ import numpy as np
+else:
+ import dp_numpy as np
# To prevent import cycles place any internal imports in the branch below
# and use a string literal forward reference to it in subsequent types
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 3d97711d5f8c3..44c693cd7ca38 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -20,7 +20,11 @@
)
import warnings
-import numpy as np
+import os
+if os.environ.get("DP_NUMPY", "1") == "0":
+ import numpy as np
+else:
+ import dp_numpy as np
from pandas._libs import (
algos as libalgos,
diff --git a/pandas/core/base.py b/pandas/core/base.py
index d4421560bcea7..25023052f1ffc 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -16,7 +16,11 @@
)
import warnings
-import numpy as np
+import os
+if os.environ.get("DP_NUMPY", "1") == "0":
+ import numpy as np
+else:
+ import dp_numpy as np
from pandas._config import using_copy_on_write
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f37be37f37693..bb24b3846669c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -36,7 +36,11 @@
)
import warnings
-import numpy as np
+import os
+if os.environ.get("DP_NUMPY", "1") == "0":
+ import numpy as np
+else:
+ import dp_numpy as np
from numpy import ma
from pandas._config import (
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 45139d84614b3..b3eca0f0a2574 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -13367,6 +13367,16 @@ def make_doc(name: str, ndim: int) -> str:
see_also = _stat_func_see_also
examples = _sum_examples
kwargs = {"min_count": _min_count_stub}
+
+ elif name == "laplace_sum":
+ base_doc = _sum_prod_doc
+ desc = (
+ "Return the laplace sum of the values over the requested axis.\n\n"
+ "This is equivalent to the method ``dp_numpy.laplace_sum``."
+ )
+ see_also = _stat_func_see_also
+ examples = _sum_examples
+ kwargs = {"min_count": _min_count_stub}
elif name == "prod":
base_doc = _sum_prod_doc
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 330effe0f0a9f..9bf5923619602 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -13,7 +13,11 @@
import warnings
import weakref
-import numpy as np
+import os
+if os.environ.get("DP_NUMPY", "1") == "0":
+ import numpy as np
+else:
+ import dp_numpy as np
from pandas._config import (
get_option,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index b109ce25a3e73..04751ccba9de5 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -14,7 +14,13 @@
import warnings
import weakref
-import numpy as np
+import os
+if os.environ.get("DP_NUMPY", "1") == "0":
+ import numpy as np
+else:
+ import dp_numpy as np
+
+import numpy
from pandas._config import (
using_copy_on_write,
@@ -677,7 +683,7 @@ def reindex_indexer(
return result
# Should be intp, but in some cases we get int64 on 32bit builds
- assert isinstance(indexer, np.ndarray)
+ assert (isinstance(indexer, np.ndarray) or isinstance(indexer, numpy.ndarray))
# some axes don't allow reindexing with dups
if not allow_dups:
@@ -2274,7 +2280,7 @@ def _merge_blocks(
new_values: ArrayLike
- if isinstance(blocks[0].dtype, np.dtype):
+ if isinstance(blocks[0].dtype, np.dtype) or isinstance(blocks[0].dtype, numpy.dtype):
# error: List comprehension has incompatible type List[Union[ndarray,
# ExtensionArray]]; expected List[Union[complex, generic,
# Sequence[Union[int, float, complex, str, bytes, generic]],
@@ -2316,7 +2322,8 @@ def _preprocess_slice_or_indexer(
)
else:
if (
- not isinstance(slice_or_indexer, np.ndarray)
+ (not isinstance(slice_or_indexer, np.ndarray)
+ and not isinstance(slice_or_indexer, numpy.ndarray))
or slice_or_indexer.dtype.kind != "i"
):
dtype = getattr(slice_or_indexer, "dtype", None)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c5f622a113258..4ec26c955b385 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -24,7 +24,13 @@
import warnings
import weakref
-import numpy as np
+import os
+if os.environ.get("DP_NUMPY", "1") == "0":
+ import numpy as np
+else:
+ import dp_numpy as np
+
+import numpy
from pandas._config import using_copy_on_write
from pandas._config.config import _get_option
@@ -6318,6 +6324,23 @@ def sum(
**kwargs,
):
return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs)
+
+ @doc(make_doc("laplace_sum", ndim=1))
+ def laplace_sum(
+ self,
+ axis: Axis | None = None,
+ skipna: bool = True,
+ numeric_only: bool = False,
+ min_count: int = 0,
+ **kwargs,
+ ):
+ assert type(self.values) == np.ndarray
+ eps = kwargs.get('eps')
+ kwargs.pop('eps')
+ self.values.unset_sensitive()
+ raw_val = NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs)
+ noised_val = numpy.random.laplace(loc=raw_val, scale=eps)
+ return noised_val
@doc(make_doc("prod", ndim=1))
def prod(
| できるだけ最小限の書き換えでpandasにdp_numpyを組み込む。
このPull Requestは、mainとの差分の可視化用。 | https://api.github.com/repos/pandas-dev/pandas/pulls/56216 | 2023-11-28T00:18:18Z | 2023-11-28T00:19:22Z | null | 2023-11-28T00:29:34Z |
CoW: Avoid warning for ArrowDtypes when setting inplace | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 843441e4865c7..8fce2be8e4e44 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -54,7 +54,11 @@
)
import pandas.core.algorithms as algos
-from pandas.core.arrays import DatetimeArray
+from pandas.core.arrays import (
+ ArrowExtensionArray,
+ ArrowStringArray,
+ DatetimeArray,
+)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
@@ -1343,11 +1347,15 @@ def column_setitem(
intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)
"""
if warn_copy_on_write() and not self._has_no_reference(loc):
- warnings.warn(
- COW_WARNING_GENERAL_MSG,
- FutureWarning,
- stacklevel=find_stack_level(),
- )
+ if not isinstance(
+ self.blocks[self.blknos[loc]].values,
+ (ArrowExtensionArray, ArrowStringArray),
+ ):
+ warnings.warn(
+ COW_WARNING_GENERAL_MSG,
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
elif using_copy_on_write() and not self._has_no_reference(loc):
blkno = self.blknos[loc]
# Split blocks to only copy the column we want to modify
diff --git a/pandas/tests/copy_view/test_interp_fillna.py b/pandas/tests/copy_view/test_interp_fillna.py
index cdb06de90a568..c2482645b072e 100644
--- a/pandas/tests/copy_view/test_interp_fillna.py
+++ b/pandas/tests/copy_view/test_interp_fillna.py
@@ -344,8 +344,9 @@ def test_fillna_inplace_ea_noop_shares_memory(
assert not df._mgr._has_no_reference(1)
assert not view._mgr._has_no_reference(1)
- # TODO(CoW-warn) should this warn for ArrowDtype?
- with tm.assert_cow_warning(warn_copy_on_write):
+ with tm.assert_cow_warning(
+ warn_copy_on_write and "pyarrow" not in any_numeric_ea_and_arrow_dtype
+ ):
df.iloc[0, 1] = 100
if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write:
tm.assert_frame_equal(df_orig, view)
| xref https://github.com/pandas-dev/pandas/issues/56019
We shouldn't warn if we know that we are not actually inplace | https://api.github.com/repos/pandas-dev/pandas/pulls/56215 | 2023-11-28T00:00:02Z | 2023-12-04T11:17:24Z | 2023-12-04T11:17:24Z | 2023-12-04T11:17:31Z |
CoW: Add warning for slicing a Series with a MultiIndex | diff --git a/pandas/core/series.py b/pandas/core/series.py
index ff03b5071e3b1..b1ac6225cb71e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1175,7 +1175,7 @@ def _get_values_tuple(self, key: tuple):
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
new_ser = self._constructor(self._values[indexer], index=new_index, copy=False)
- if using_copy_on_write() and isinstance(indexer, slice):
+ if isinstance(indexer, slice):
new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type]
return new_ser.__finalize__(self)
@@ -1217,7 +1217,7 @@ def _get_value(self, label, takeable: bool = False):
new_ser = self._constructor(
new_values, index=new_index, name=self.name, copy=False
)
- if using_copy_on_write() and isinstance(loc, slice):
+ if isinstance(loc, slice):
new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type]
return new_ser.__finalize__(self)
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 355eb2db0ef09..205e80088d8f4 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -1141,16 +1141,18 @@ def test_set_value_copy_only_necessary_column(
assert np.shares_memory(get_array(df, "a"), get_array(view, "a"))
-def test_series_midx_slice(using_copy_on_write):
+def test_series_midx_slice(using_copy_on_write, warn_copy_on_write):
ser = Series([1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]]))
+ ser_orig = ser.copy()
result = ser[1]
assert np.shares_memory(get_array(ser), get_array(result))
- # TODO(CoW-warn) should warn -> reference is only tracked in CoW mode, so
- # warning is not triggered
- result.iloc[0] = 100
+ with tm.assert_cow_warning(warn_copy_on_write):
+ result.iloc[0] = 100
if using_copy_on_write:
+ tm.assert_series_equal(ser, ser_orig)
+ else:
expected = Series(
- [1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]])
+ [100, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]])
)
tm.assert_series_equal(ser, expected)
@@ -1181,16 +1183,15 @@ def test_getitem_midx_slice(
assert df.iloc[0, 0] == 100
-def test_series_midx_tuples_slice(using_copy_on_write):
+def test_series_midx_tuples_slice(using_copy_on_write, warn_copy_on_write):
ser = Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]),
)
result = ser[(1, 2)]
assert np.shares_memory(get_array(ser), get_array(result))
- # TODO(CoW-warn) should warn -> reference is only tracked in CoW mode, so
- # warning is not triggered
- result.iloc[0] = 100
+ with tm.assert_cow_warning(warn_copy_on_write):
+ result.iloc[0] = 100
if using_copy_on_write:
expected = Series(
[1, 2, 3],
| xref https://github.com/pandas-dev/pandas/issues/56019
Fixing one more todo
| https://api.github.com/repos/pandas-dev/pandas/pulls/56214 | 2023-11-27T23:29:08Z | 2023-12-04T12:26:35Z | 2023-12-04T12:26:35Z | 2023-12-04T12:41:20Z |
CoW: Avoid warning in apply for mixed dtype frame | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index bb3cc3a03760f..169a44accf066 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -20,6 +20,7 @@
from pandas._config import option_context
from pandas._libs import lib
+from pandas._libs.internals import BlockValuesRefs
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
@@ -1254,6 +1255,8 @@ def series_generator(self) -> Generator[Series, None, None]:
ser = self.obj._ixs(0, axis=0)
mgr = ser._mgr
+ is_view = mgr.blocks[0].refs.has_reference() # type: ignore[union-attr]
+
if isinstance(ser.dtype, ExtensionDtype):
# values will be incorrect for this block
# TODO(EA2D): special case would be unnecessary with 2D EAs
@@ -1267,6 +1270,14 @@ def series_generator(self) -> Generator[Series, None, None]:
ser._mgr = mgr
mgr.set_values(arr)
object.__setattr__(ser, "_name", name)
+ if not is_view:
+ # In apply_series_generator we store the a shallow copy of the
+ # result, which potentially increases the ref count of this reused
+ # `ser` object (depending on the result of the applied function)
+ # -> if that happened and `ser` is already a copy, then we reset
+ # the refs here to avoid triggering a unnecessary CoW inside the
+ # applied function (https://github.com/pandas-dev/pandas/pull/56212)
+ mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) # type: ignore[union-attr]
yield ser
@staticmethod
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index a02f31d4483b2..9d2a1e634eea2 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -2071,11 +2071,11 @@ def set_values(self, values: ArrayLike) -> None:
Set the values of the single block in place.
Use at your own risk! This does not check if the passed values are
- valid for the current Block/SingleBlockManager (length, dtype, etc).
+ valid for the current Block/SingleBlockManager (length, dtype, etc),
+ and this does not properly keep track of references.
"""
- # TODO(CoW) do we need to handle copy on write here? Currently this is
- # only used for FrameColumnApply.series_generator (what if apply is
- # mutating inplace?)
+ # NOTE(CoW) Currently this is only used for FrameColumnApply.series_generator
+ # which handles CoW by setting the refs manually if necessary
self.blocks[0].values = values
self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index 48dde6d42f743..b5ad1094f5bf5 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -18,7 +18,6 @@
DataFrame,
Series,
date_range,
- notna,
)
import pandas._testing as tm
@@ -150,9 +149,7 @@ def test_transform_axis_1_raises():
Series([1]).transform("sum", axis=1)
-# TODO(CoW-warn) should not need to warn
-@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
-def test_apply_modify_traceback(warn_copy_on_write):
+def test_apply_modify_traceback():
data = DataFrame(
{
"A": [
@@ -207,15 +204,9 @@ def transform(row):
row["D"] = 7
return row
- def transform2(row):
- if notna(row["C"]) and row["C"].startswith("shin") and row["A"] == "foo":
- row["D"] = 7
- return row
-
msg = "'float' object has no attribute 'startswith'"
with pytest.raises(AttributeError, match=msg):
- with tm.assert_cow_warning(warn_copy_on_write):
- data.apply(transform, axis=1)
+ data.apply(transform, axis=1)
@pytest.mark.parametrize(
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index ba8e4bd684198..84b2c59bda5d9 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -2013,3 +2013,31 @@ def test_eval_inplace(using_copy_on_write, warn_copy_on_write):
df.iloc[0, 0] = 100
if using_copy_on_write:
tm.assert_frame_equal(df_view, df_orig)
+
+
+def test_apply_modify_row(using_copy_on_write, warn_copy_on_write):
+ # Case: applying a function on each row as a Series object, where the
+ # function mutates the row object (which needs to trigger CoW if row is a view)
+ df = DataFrame({"A": [1, 2], "B": [3, 4]})
+ df_orig = df.copy()
+
+ def transform(row):
+ row["B"] = 100
+ return row
+
+ with tm.assert_cow_warning(warn_copy_on_write):
+ df.apply(transform, axis=1)
+
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ assert df.loc[0, "B"] == 100
+
+ # row Series is a copy
+ df = DataFrame({"A": [1, 2], "B": ["b", "c"]})
+ df_orig = df.copy()
+
+ with tm.assert_produces_warning(None):
+ df.apply(transform, axis=1)
+
+ tm.assert_frame_equal(df, df_orig)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Yikes this is ugly
xref https://github.com/pandas-dev/pandas/issues/56019 | https://api.github.com/repos/pandas-dev/pandas/pulls/56212 | 2023-11-27T21:58:44Z | 2023-12-04T19:37:18Z | 2023-12-04T19:37:18Z | 2023-12-05T08:37:12Z |
CoW: Avoid warning if temporary object is a copy | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 847f514451add..c9da273b99ce9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8903,7 +8903,7 @@ def update(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif not PYPY and not using_copy_on_write() and self._is_view_after_cow_rules():
if sys.getrefcount(self) <= REF_COUNT:
warnings.warn(
_chained_assignment_warning_method_msg,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fb9f987a29bbf..44954e3781f5d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -668,6 +668,14 @@ def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]:
def _info_axis(self) -> Index:
return getattr(self, self._info_axis_name)
+ def _is_view_after_cow_rules(self):
+ # Only to be used in cases of chained assignment checks, this is a
+ # simplified check that assumes that either the whole object is a view
+ # or a copy
+ if len(self._mgr.blocks) == 0: # type: ignore[union-attr]
+ return False
+ return self._mgr.blocks[0].refs.has_reference() # type: ignore[union-attr]
+
@property
def shape(self) -> tuple[int, ...]:
"""
@@ -7268,7 +7276,11 @@ def fillna(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and _check_cacher(self):
@@ -7550,7 +7562,11 @@ def ffill(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and _check_cacher(self):
@@ -7733,7 +7749,11 @@ def bfill(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and _check_cacher(self):
@@ -7899,7 +7919,11 @@ def replace(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and _check_cacher(self):
@@ -8340,7 +8364,11 @@ def interpolate(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and _check_cacher(self):
@@ -8978,7 +9006,11 @@ def clip(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
@@ -10926,7 +10958,11 @@ def where(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
@@ -11005,7 +11041,11 @@ def mask(
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif (
+ not PYPY
+ and not using_copy_on_write()
+ and self._is_view_after_cow_rules()
+ ):
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ff03b5071e3b1..207f30b68fde2 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3580,7 +3580,7 @@ def update(self, other: Series | Sequence | Mapping) -> None:
ChainedAssignmentError,
stacklevel=2,
)
- elif not PYPY and not using_copy_on_write():
+ elif not PYPY and not using_copy_on_write() and self._is_view_after_cow_rules():
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
if _check_cacher(self):
diff --git a/pandas/tests/copy_view/test_clip.py b/pandas/tests/copy_view/test_clip.py
index 7ed6a1f803ead..7c87646424e2f 100644
--- a/pandas/tests/copy_view/test_clip.py
+++ b/pandas/tests/copy_view/test_clip.py
@@ -92,10 +92,10 @@ def test_clip_chained_inplace(using_copy_on_write):
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
df["a"].clip(1, 2, inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[["a"]].clip(1, 2, inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[df["a"] > 1].clip(1, 2, inplace=True)
diff --git a/pandas/tests/copy_view/test_interp_fillna.py b/pandas/tests/copy_view/test_interp_fillna.py
index cdb06de90a568..5091fd9aed8a4 100644
--- a/pandas/tests/copy_view/test_interp_fillna.py
+++ b/pandas/tests/copy_view/test_interp_fillna.py
@@ -366,11 +366,11 @@ def test_fillna_chained_assignment(using_copy_on_write):
df[["a"]].fillna(100, inplace=True)
tm.assert_frame_equal(df, df_orig)
else:
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[["a"]].fillna(100, inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[df.a > 5].fillna(100, inplace=True)
@@ -394,10 +394,10 @@ def test_interpolate_chained_assignment(using_copy_on_write, func):
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
getattr(df["a"], func)(inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
getattr(df[["a"]], func)(inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
getattr(df[df["a"] > 1], func)(inplace=True)
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index ba8e4bd684198..62214293df912 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1562,11 +1562,11 @@ def test_chained_where_mask(using_copy_on_write, func):
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
getattr(df["a"], func)(df["a"] > 2, 5, inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
getattr(df[df["a"] > 1], func)(df["a"] > 2, 5, inplace=True)
@@ -1840,11 +1840,11 @@ def test_update_chained_assignment(using_copy_on_write):
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
df["a"].update(ser2)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[["a"]].update(ser2.to_frame())
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[df["a"] > 1].update(ser2.to_frame())
diff --git a/pandas/tests/copy_view/test_replace.py b/pandas/tests/copy_view/test_replace.py
index 3d8559a1905fc..eb3b1a5ef68e8 100644
--- a/pandas/tests/copy_view/test_replace.py
+++ b/pandas/tests/copy_view/test_replace.py
@@ -401,11 +401,11 @@ def test_replace_chained_assignment(using_copy_on_write):
df[["a"]].replace(1, 100, inplace=True)
tm.assert_frame_equal(df, df_orig)
else:
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[["a"]].replace(1, 100, inplace=True)
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
with option_context("mode.chained_assignment", None):
df[df.a > 5].replace(1, 100, inplace=True)
diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
index 01e5db87ce456..0dd1a56890fee 100644
--- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
@@ -34,12 +34,12 @@ def test_detect_chained_assignment(using_copy_on_write, warn_copy_on_write):
with tm.raises_chained_assignment_error():
zed["eyes"]["right"].fillna(value=555, inplace=True)
elif warn_copy_on_write:
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
zed["eyes"]["right"].fillna(value=555, inplace=True)
else:
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(SettingWithCopyError, match=msg):
- with tm.assert_produces_warning(FutureWarning, match="inplace method"):
+ with tm.assert_produces_warning(None):
zed["eyes"]["right"].fillna(value=555, inplace=True)
| xref https://github.com/pandas-dev/pandas/issues/56019
That's the stricter check we talked about today. The cases we exclude here are no-ops without CoW as well, so no need to warn | https://api.github.com/repos/pandas-dev/pandas/pulls/56211 | 2023-11-27T21:40:28Z | 2023-12-04T12:27:15Z | 2023-12-04T12:27:15Z | 2023-12-04T12:40:42Z |
TST/CLN: Remove makeDataFrame | diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py
index 98113b6c41821..d958c80fbcdef 100644
--- a/pandas/tests/frame/methods/test_set_index.py
+++ b/pandas/tests/frame/methods/test_set_index.py
@@ -155,7 +155,11 @@ def test_set_index(self, float_string_frame):
df.set_index(idx[::2])
def test_set_index_names(self):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ np.ones((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(10)], dtype=object),
+ )
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 8083795a69413..7fd795dc84cca 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1566,7 +1566,10 @@ def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne)
f(df, 0)
def test_comparison_protected_from_errstate(self):
- missing_df = tm.makeDataFrame()
+ missing_df = DataFrame(
+ np.ones((10, 4), dtype=np.float64),
+ columns=Index(list("ABCD"), dtype=object),
+ )
missing_df.loc[missing_df.index[0], "A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index bf0975a803dce..73ac9a3bb1a44 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -12,6 +12,7 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
Series,
Timestamp,
date_range,
@@ -627,7 +628,10 @@ def test_chained_getitem_with_lists(self):
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
- df = tm.makeDataFrame()
+ df = DataFrame(
+ np.zeros((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ )
df["A"] # cache series
df.loc["Hello Friend"] = df.iloc[0]
assert "Hello Friend" in df["A"].index
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 507d7ed4bf9d0..9aeac58de50bb 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -1234,7 +1234,11 @@ def test_freeze_panes(self, path):
tm.assert_frame_equal(result, expected)
def test_path_path_lib(self, engine, ext):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
@@ -1242,7 +1246,11 @@ def test_path_path_lib(self, engine, ext):
tm.assert_frame_equal(result, df)
def test_path_local_path(self, engine, ext):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index 613f609320f31..33b9fe9b665f3 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -10,6 +10,7 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
compat,
)
import pandas._testing as tm
@@ -665,7 +666,7 @@ def test_na_rep_truncated(self):
def test_to_csv_errors(self, errors):
# GH 22610
data = ["\ud800foo"]
- ser = pd.Series(data, index=pd.Index(data))
+ ser = pd.Series(data, index=Index(data))
with tm.ensure_clean("test.csv") as path:
ser.to_csv(path, errors=errors)
# No use in reading back the data as it is not the same anymore
@@ -679,7 +680,11 @@ def test_to_csv_binary_handle(self, mode):
GH 35058 and GH 19827
"""
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with tm.ensure_clean() as path:
with open(path, mode="w+b") as handle:
df.to_csv(handle, mode=mode)
@@ -713,7 +718,11 @@ def test_to_csv_encoding_binary_handle(self, mode):
def test_to_csv_iterative_compression_name(compression):
# GH 38714
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with tm.ensure_clean() as path:
df.to_csv(path, compression=compression, chunksize=1)
tm.assert_frame_equal(
@@ -723,7 +732,11 @@ def test_to_csv_iterative_compression_name(compression):
def test_to_csv_iterative_compression_buffer(compression):
# GH 38714
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with io.BytesIO() as buffer:
df.to_csv(buffer, compression=compression, chunksize=1)
buffer.seek(0)
diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py
index c374795019ff4..a7a8d031da215 100644
--- a/pandas/tests/io/parser/common/test_file_buffer_url.py
+++ b/pandas/tests/io/parser/common/test_file_buffer_url.py
@@ -11,6 +11,7 @@
from urllib.error import URLError
import uuid
+import numpy as np
import pytest
from pandas.errors import (
@@ -19,7 +20,10 @@
)
import pandas.util._test_decorators as td
-from pandas import DataFrame
+from pandas import (
+ DataFrame,
+ Index,
+)
import pandas._testing as tm
pytestmark = pytest.mark.filterwarnings(
@@ -66,7 +70,11 @@ def test_local_file(all_parsers, csv_dir_path):
@xfail_pyarrow # AssertionError: DataFrame.index are different
def test_path_path_lib(all_parsers):
parser = all_parsers
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
@@ -74,7 +82,11 @@ def test_path_path_lib(all_parsers):
@xfail_pyarrow # AssertionError: DataFrame.index are different
def test_path_local_path(all_parsers):
parser = all_parsers
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index 89a0162af1c54..9eb9ffa53dd22 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -11,6 +11,7 @@
import pandas as pd
from pandas import (
DataFrame,
+ Index,
Series,
_testing as tm,
concat,
@@ -401,7 +402,7 @@ def check_col(key, name, size):
{
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
- "C": pd.Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
+ "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
"D": date_range("20130101", periods=5),
}
).set_index("C")
@@ -658,7 +659,11 @@ def test_append_hierarchical(tmp_path, setup_path, multiindex_dataframe_random_d
def test_append_misc(setup_path):
with ensure_clean_store(setup_path) as store:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
@@ -671,7 +676,11 @@ def test_append_misc(setup_path):
@pytest.mark.parametrize("chunksize", [10, 200, 1000])
def test_append_misc_chunksize(setup_path, chunksize):
# more chunksize in append tests
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
@@ -715,7 +724,11 @@ def test_append_raise(setup_path):
# test append with invalid input to get good error messages
# list in column
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
msg = re.escape(
@@ -732,7 +745,11 @@ def test_append_raise(setup_path):
store.append("df", df)
# datetime with embedded nans as object
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
@@ -756,7 +773,11 @@ def test_append_raise(setup_path):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
store.append("df", df)
df["foo"] = "foo"
diff --git a/pandas/tests/io/pytables/test_errors.py b/pandas/tests/io/pytables/test_errors.py
index adf42cc7e8d39..d956e4f5775eb 100644
--- a/pandas/tests/io/pytables/test_errors.py
+++ b/pandas/tests/io/pytables/test_errors.py
@@ -9,6 +9,7 @@
CategoricalIndex,
DataFrame,
HDFStore,
+ Index,
MultiIndex,
_testing as tm,
date_range,
@@ -25,7 +26,11 @@
def test_pass_spec_to_storer(setup_path):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with ensure_clean_store(setup_path) as store:
store.put("df", df)
@@ -60,14 +65,22 @@ def test_unimplemented_dtypes_table_columns(setup_path):
# currently not supported dtypes ####
for n, f in dtypes:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df[n] = f
msg = re.escape(f"[{n}] is not implemented as a table column")
with pytest.raises(TypeError, match=msg):
store.append(f"df1_{n}", df)
# frame
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py
index cb44512d4506c..2920f0b07b31e 100644
--- a/pandas/tests/io/pytables/test_file_handling.py
+++ b/pandas/tests/io/pytables/test_file_handling.py
@@ -17,6 +17,7 @@
from pandas import (
DataFrame,
HDFStore,
+ Index,
Series,
_testing as tm,
read_hdf,
@@ -145,7 +146,11 @@ def test_reopen_handle(tmp_path, setup_path):
def test_open_args(setup_path):
with tm.ensure_clean(setup_path) as path:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# create an in memory store
store = HDFStore(
@@ -172,7 +177,11 @@ def test_flush(setup_path):
def test_complibs_default_settings(tmp_path, setup_path):
# GH15943
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# Set complevel and check if complib is automatically set to
# default value
@@ -211,7 +220,11 @@ def test_complibs_default_settings(tmp_path, setup_path):
def test_complibs_default_settings_override(tmp_path, setup_path):
# Check if file-defaults can be overridden on a per table basis
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
tmpfile = tmp_path / setup_path
store = HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
@@ -325,7 +338,11 @@ def test_multiple_open_close(tmp_path, setup_path):
path = tmp_path / setup_path
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.to_hdf(path, key="df", mode="w", format="table")
# single
@@ -402,7 +419,11 @@ def test_multiple_open_close(tmp_path, setup_path):
# ops on a closed store
path = tmp_path / setup_path
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.to_hdf(path, key="df", mode="w", format="table")
store = HDFStore(path)
diff --git a/pandas/tests/io/pytables/test_keys.py b/pandas/tests/io/pytables/test_keys.py
index fd7df29595090..c0f2c34ff37ed 100644
--- a/pandas/tests/io/pytables/test_keys.py
+++ b/pandas/tests/io/pytables/test_keys.py
@@ -1,8 +1,10 @@
+import numpy as np
import pytest
from pandas import (
DataFrame,
HDFStore,
+ Index,
Series,
_testing as tm,
)
@@ -20,7 +22,11 @@ def test_keys(setup_path):
store["b"] = Series(
range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]
)
- store["c"] = tm.makeDataFrame()
+ store["c"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
assert len(store) == 3
expected = {"/a", "/b", "/c"}
diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py
index 59a05dc9ea546..8a6e3c9006439 100644
--- a/pandas/tests/io/pytables/test_put.py
+++ b/pandas/tests/io/pytables/test_put.py
@@ -47,7 +47,11 @@ def test_format_kwarg_in_constructor(tmp_path, setup_path):
def test_api_default_format(tmp_path, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with pd.option_context("io.hdf.default_format", "fixed"):
_maybe_remove(store, "df")
@@ -68,7 +72,11 @@ def test_api_default_format(tmp_path, setup_path):
assert store.get_storer("df").is_table
path = tmp_path / setup_path
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with pd.option_context("io.hdf.default_format", "fixed"):
df.to_hdf(path, key="df")
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index e05e1e96eb11f..693f10172a99e 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -39,7 +39,11 @@ def roundtrip(key, obj, **kwargs):
o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
tm.assert_series_equal(o, roundtrip("string_series", o))
- o = tm.makeDataFrame()
+ o = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
@@ -136,7 +140,11 @@ def test_api_2(tmp_path, setup_path):
def test_api_invalid(tmp_path, setup_path):
path = tmp_path / setup_path
# Invalid.
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
msg = "Can only append to Tables"
@@ -347,7 +355,11 @@ def test_timeseries_preepoch(setup_path, request):
"compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_frame(compression, setup_path):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# put in some random NAs
df.iloc[0, 0] = np.nan
@@ -424,7 +436,11 @@ def test_store_hierarchical(setup_path, multiindex_dataframe_random_data):
)
def test_store_mixed(compression, setup_path):
def _make_one():
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py
index e387b1b607f63..3eaa1e86dbf6d 100644
--- a/pandas/tests/io/pytables/test_select.py
+++ b/pandas/tests/io/pytables/test_select.py
@@ -266,7 +266,11 @@ def test_select_dtypes(setup_path):
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
expected = df[df["A"] > 0]
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 7e8365a8f9ffa..98257f1765d53 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -45,7 +45,11 @@ def test_context(setup_path):
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
- tbl["a"] = tm.makeDataFrame()
+ tbl["a"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
@@ -107,9 +111,17 @@ def test_repr(setup_path):
store["b"] = Series(
range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]
)
- store["c"] = tm.makeDataFrame()
+ store["c"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
@@ -136,7 +148,11 @@ def test_repr(setup_path):
# storers
with ensure_clean_store(setup_path) as store:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
store.append("df", df)
s = store.get_storer("df")
@@ -147,8 +163,16 @@ def test_repr(setup_path):
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
- store["b"] = tm.makeDataFrame()
- store["foo/bar"] = tm.makeDataFrame()
+ store["b"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ store["foo/bar"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
assert "a" in store
assert "b" in store
assert "c" not in store
@@ -161,14 +185,22 @@ def test_contains(setup_path):
with tm.assert_produces_warning(
tables.NaturalNameWarning, check_stacklevel=False
):
- store["node())"] = tm.makeDataFrame()
+ store["node())"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
- store["b"] = tm.makeDataFrame()
+ store["b"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
@@ -432,7 +464,11 @@ def test_mi_data_columns(setup_path):
def test_table_mixed_dtypes(setup_path):
# frame
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
@@ -482,7 +518,11 @@ def test_calendar_roundtrip_issue(setup_path):
def test_remove(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
@@ -542,7 +582,11 @@ def test_same_name_scoping(setup_path):
def test_store_index_name(setup_path):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
@@ -581,7 +625,11 @@ def test_store_index_name_numpy_str(tmp_path, table_format, setup_path, unit, tz
def test_store_series_name(setup_path):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
series = df["A"]
with ensure_clean_store(setup_path) as store:
@@ -783,7 +831,11 @@ def test_start_stop_fixed(setup_path):
tm.assert_series_equal(result, expected)
# sparse; not implemented
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
@@ -806,7 +858,11 @@ def test_select_filter_corner(setup_path):
def test_path_pathlib():
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df")
@@ -832,7 +888,11 @@ def test_contiguous_mixed_data_table(start, stop, setup_path):
def test_path_pathlib_hdfstore():
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
def writer(path):
with HDFStore(path) as store:
@@ -847,7 +907,11 @@ def reader(path):
def test_pickle_path_localpath():
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df")
)
@@ -855,7 +919,11 @@ def test_pickle_path_localpath():
def test_path_localpath_hdfstore():
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
def writer(path):
with HDFStore(path) as store:
@@ -871,7 +939,11 @@ def reader(path):
@pytest.mark.parametrize("propindexes", [True, False])
def test_copy(propindexes):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with tm.ensure_clean() as path:
with HDFStore(path) as st:
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 26035d1af7f90..718f967f2f3d8 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -15,6 +15,7 @@
import pickle
import tempfile
+import numpy as np
import pytest
from pandas.compat import is_platform_windows
@@ -442,7 +443,11 @@ def test_next(self, mmap_file):
def test_unknown_engine(self):
with tm.ensure_clean() as path:
- df = tm.makeDataFrame()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.to_csv(path)
with pytest.raises(ValueError, match="Unknown engine"):
pd.read_csv(path, engine="pyt")
@@ -454,7 +459,11 @@ def test_binary_mode(self):
GH 35058
"""
with tm.ensure_clean() as path:
- df = tm.makeDataFrame()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.to_csv(path, mode="w+b")
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
@@ -468,7 +477,11 @@ def test_warning_missing_utf_bom(self, encoding, compression_):
GH 35681
"""
- df = tm.makeDataFrame()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with tm.ensure_clean() as path:
with tm.assert_produces_warning(UnicodeWarning):
df.to_csv(path, compression=compression_, encoding=encoding)
@@ -498,7 +511,11 @@ def test_is_fsspec_url():
@pytest.mark.parametrize("format", ["csv", "json"])
def test_codecs_encoding(encoding, format):
# GH39247
- expected = tm.makeDataFrame()
+ expected = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with tm.ensure_clean() as path:
with codecs.open(path, mode="w", encoding=encoding) as handle:
getattr(expected, f"to_{format}")(handle)
@@ -512,7 +529,11 @@ def test_codecs_encoding(encoding, format):
def test_codecs_get_writer_reader():
# GH39247
- expected = tm.makeDataFrame()
+ expected = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with tm.ensure_clean() as path:
with open(path, "wb") as handle:
with codecs.getwriter("utf-8")(handle) as encoded:
@@ -534,7 +555,11 @@ def test_explicit_encoding(io_class, mode, msg):
# GH39247; this test makes sure that if a user provides mode="*t" or "*b",
# it is used. In the case of this test it leads to an error as intentionally the
# wrong mode is requested
- expected = tm.makeDataFrame()
+ expected = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with io_class() as buffer:
with pytest.raises(TypeError, match=msg):
expected.to_csv(buffer, mode=f"w{mode}")
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index af83ec4a55fa5..3a58dda9e8dc4 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -9,6 +9,7 @@
import time
import zipfile
+import numpy as np
import pytest
from pandas.compat import is_platform_windows
@@ -142,7 +143,11 @@ def test_compression_binary(compression_only):
GH22555
"""
- df = tm.makeDataFrame()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# with a file
with tm.ensure_clean() as path:
@@ -170,7 +175,11 @@ def test_gzip_reproducibility_file_name():
GH 28103
"""
- df = tm.makeDataFrame()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
compression_options = {"method": "gzip", "mtime": 1}
# test for filename
@@ -189,7 +198,11 @@ def test_gzip_reproducibility_file_object():
GH 28103
"""
- df = tm.makeDataFrame()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
compression_options = {"method": "gzip", "mtime": 1}
# test for file object
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 5ec8705251d95..572abbf7c48f7 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -132,17 +132,29 @@ def test_rw_use_threads(self):
self.check_round_trip(df, use_threads=False)
def test_path_pathlib(self):
- df = tm.makeDataFrame().reset_index()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ ).reset_index()
result = tm.round_trip_pathlib(df.to_feather, read_feather)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
- df = tm.makeDataFrame().reset_index()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ ).reset_index()
result = tm.round_trip_localpath(df.to_feather, read_feather)
tm.assert_frame_equal(df, result)
def test_passthrough_keywords(self):
- df = tm.makeDataFrame().reset_index()
+ df = pd.DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ ).reset_index()
self.check_round_trip(df, write_kwargs={"version": 1})
@pytest.mark.network
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 6e55cde12f2f9..0ce6a8bf82cd8 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -9,6 +9,7 @@
from pandas import (
DataFrame,
+ Index,
date_range,
read_csv,
read_excel,
@@ -145,7 +146,11 @@ def test_to_csv_compression_encoding_gcs(
GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and
GH 32392 (read_csv, encoding)
"""
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# reference of compressed and encoded file
compression = {"method": compression_only}
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 780b25fd0f346..e1fac21094139 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -41,6 +41,7 @@
import pandas as pd
from pandas import (
+ DataFrame,
Index,
Series,
period_range,
@@ -220,13 +221,21 @@ def test_round_trip_current(typ, expected, pickle_writer, writer):
def test_pickle_path_pathlib():
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
result = tm.round_trip_pathlib(df.to_pickle, pd.read_pickle)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath():
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
result = tm.round_trip_localpath(df.to_pickle, pd.read_pickle)
tm.assert_frame_equal(df, result)
@@ -280,7 +289,11 @@ def test_write_explicit(self, compression, get_random_path):
path2 = base + ".raw"
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# write to compressed file
df.to_pickle(p1, compression=compression)
@@ -299,7 +312,11 @@ def test_write_explicit(self, compression, get_random_path):
def test_write_explicit_bad(self, compression, get_random_path):
with pytest.raises(ValueError, match="Unrecognized compression type"):
with tm.ensure_clean(get_random_path) as path:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.to_pickle(path, compression=compression)
def test_write_infer(self, compression_ext, get_random_path):
@@ -309,7 +326,11 @@ def test_write_infer(self, compression_ext, get_random_path):
compression = self._extension_to_compression.get(compression_ext.lower())
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# write to compressed file by inferred compression method
df.to_pickle(p1)
@@ -330,7 +351,11 @@ def test_read_explicit(self, compression, get_random_path):
path2 = base + ".compressed"
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# write to uncompressed file
df.to_pickle(p1, compression=None)
@@ -349,7 +374,11 @@ def test_read_infer(self, compression_ext, get_random_path):
compression = self._extension_to_compression.get(compression_ext.lower())
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# write to uncompressed file
df.to_pickle(p1, compression=None)
@@ -371,7 +400,11 @@ class TestProtocol:
@pytest.mark.parametrize("protocol", [-1, 0, 1, 2])
def test_read(self, protocol, get_random_path):
with tm.ensure_clean(get_random_path) as path:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.to_pickle(path, protocol=protocol)
df2 = pd.read_pickle(path)
tm.assert_frame_equal(df, df2)
@@ -404,7 +437,11 @@ def test_unicode_decode_error(datapath, pickle_file, excols):
def test_pickle_buffer_roundtrip():
with tm.ensure_clean() as path:
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
with open(path, "wb") as fh:
df.to_pickle(fh)
with open(path, "rb") as fh:
@@ -450,7 +487,11 @@ def close(self):
def mock_urlopen_read(*args, **kwargs):
return MockReadResponse(path)
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
python_pickler(df, path)
monkeypatch.setattr("urllib.request.urlopen", mock_urlopen_read)
result = pd.read_pickle(mockurl)
@@ -461,7 +502,11 @@ def test_pickle_fsspec_roundtrip():
pytest.importorskip("fsspec")
with tm.ensure_clean():
mockurl = "memory://mockfile"
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.to_pickle(mockurl)
result = pd.read_pickle(mockurl)
tm.assert_frame_equal(df, result)
@@ -487,7 +532,11 @@ def test_pickle_binary_object_compression(compression):
GH 26237, GH 29054, and GH 29570
"""
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
# reference for compression
with tm.ensure_clean() as path:
@@ -567,7 +616,7 @@ def test_pickle_preserves_block_ndim():
@pytest.mark.parametrize("protocol", [pickle.DEFAULT_PROTOCOL, pickle.HIGHEST_PROTOCOL])
def test_pickle_big_dataframe_compression(protocol, compression):
# GH#39002
- df = pd.DataFrame(range(100000))
+ df = DataFrame(range(100000))
result = tm.round_trip_pathlib(
partial(df.to_pickle, protocol=protocol, compression=compression),
partial(pd.read_pickle, compression=compression),
@@ -587,13 +636,13 @@ def test_pickle_frame_v124_unpickle_130(datapath):
with open(path, "rb") as fd:
df = pickle.load(fd)
- expected = pd.DataFrame(index=[], columns=[])
+ expected = DataFrame(index=[], columns=[])
tm.assert_frame_equal(df, expected)
def test_pickle_pos_args_deprecation():
# GH-54229
- df = pd.DataFrame({"a": [1, 2, 3]})
+ df = DataFrame({"a": [1, 2, 3]})
msg = (
r"Starting with pandas version 3.0 all arguments of to_pickle except for the "
r"argument 'path' will be keyword-only."
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 82e6c2964b8c5..19d81d50f5774 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1549,14 +1549,22 @@ def test_inf(self, infval):
df.to_stata(path)
def test_path_pathlib(self):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.index.name = "index"
reader = lambda x: read_stata(x).set_index("index")
result = tm.round_trip_pathlib(df.to_stata, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.index.name = "index"
reader = lambda x: read_stata(x).set_index("index")
result = tm.round_trip_localpath(df.to_stata, reader)
@@ -1577,7 +1585,11 @@ def test_value_labels_iterator(self, write_index):
def test_set_index(self):
# GH 17328
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.index.name = "index"
with tm.ensure_clean() as path:
df.to_stata(path)
@@ -1714,7 +1726,11 @@ def test_invalid_date_conversion(self):
def test_nonfile_writing(self, version):
# GH 21041
bio = io.BytesIO()
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.index.name = "index"
with tm.ensure_clean() as path:
df.to_stata(bio, version=version)
@@ -1726,7 +1742,11 @@ def test_nonfile_writing(self, version):
def test_gzip_writing(self):
# writing version 117 requires seek and cannot be used with gzip
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=pd.Index(list("ABCD"), dtype=object),
+ index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
df.index.name = "index"
with tm.ensure_clean() as path:
with gzip.GzipFile(path, "wb") as gz:
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index bfb9a5a9a1647..3195b7637ee3c 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1314,7 +1314,11 @@ def test_secondary_legend_multi_col(self):
def test_secondary_legend_nonts(self):
# non-ts
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
fig = mpl.pyplot.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=["A", "B"], ax=ax)
@@ -1331,7 +1335,11 @@ def test_secondary_legend_nonts(self):
def test_secondary_legend_nonts_multi_col(self):
# non-ts
- df = tm.makeDataFrame()
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
fig = mpl.pyplot.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=["C", "D"], ax=ax)
diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py
index 9e6b4ce0df1d6..634b8699a89e6 100644
--- a/pandas/tests/series/methods/test_reset_index.py
+++ b/pandas/tests/series/methods/test_reset_index.py
@@ -33,7 +33,11 @@ def test_reset_index_dti_round_trip(self):
assert df.reset_index()["Date"].iloc[0] == stamp
def test_reset_index(self):
- df = tm.makeDataFrame()[:5]
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )[:5]
ser = df.stack(future_stack=True)
ser.index.names = ["hash", "category"]
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56210 | 2023-11-27T20:13:13Z | 2023-11-28T17:08:38Z | 2023-11-28T17:08:38Z | 2024-01-22T18:38:11Z |
Update indexing unpacking logic for single block case | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8bc30cb5f0ddc..e3928621a4e48 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2140,6 +2140,12 @@ def _setitem_single_block(self, indexer, value, name: str) -> None:
"""
from pandas import Series
+ if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
+ # TODO(EA): ExtensionBlock.setitem this causes issues with
+ # setting for extensionarrays that store dicts. Need to decide
+ # if it's worth supporting that.
+ value = self._align_series(indexer, Series(value))
+
info_axis = self.obj._info_axis_number
item_labels = self.obj._get_axis(info_axis)
if isinstance(indexer, tuple):
@@ -2160,13 +2166,7 @@ def _setitem_single_block(self, indexer, value, name: str) -> None:
indexer = maybe_convert_ix(*indexer) # e.g. test_setitem_frame_align
- if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
- # TODO(EA): ExtensionBlock.setitem this causes issues with
- # setting for extensionarrays that store dicts. Need to decide
- # if it's worth supporting that.
- value = self._align_series(indexer, Series(value))
-
- elif isinstance(value, ABCDataFrame) and name != "iloc":
+ if isinstance(value, ABCDataFrame) and name != "iloc":
value = self._align_frame(indexer, value)._values
# check for chained assignment
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56209 | 2023-11-27T20:12:43Z | 2023-11-29T18:02:02Z | 2023-11-29T18:02:02Z | 2023-11-29T18:06:02Z |
BUG: inferred resolution with ISO8601 and tzoffset | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index fbff38aeefc51..70d43b852945d 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -457,6 +457,7 @@ Datetimelike
- Bug in :meth:`Index.view` to a datetime64 dtype with non-supported resolution incorrectly raising (:issue:`55710`)
- Bug in :meth:`Series.dt.round` with non-nanosecond resolution and ``NaT`` entries incorrectly raising ``OverflowError`` (:issue:`56158`)
- Bug in :meth:`Tick.delta` with very large ticks raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`)
+- Bug in :meth:`Timestamp.unit` being inferred incorrectly from an ISO8601 format string with minute or hour resolution and a timezone offset (:issue:`56208`)
- Bug in ``.astype`` converting from a higher-resolution ``datetime64`` dtype to a lower-resolution ``datetime64`` dtype (e.g. ``datetime64[us]->datetim64[ms]``) silently overflowing with values near the lower implementation bound (:issue:`55979`)
- Bug in adding or subtracting a :class:`Week` offset to a ``datetime64`` :class:`Series`, :class:`Index`, or :class:`DataFrame` column with non-nanosecond resolution returning incorrect results (:issue:`55583`)
- Bug in addition or subtraction of :class:`BusinessDay` offset with ``offset`` attribute to non-nanosecond :class:`Index`, :class:`Series`, or :class:`DataFrame` column giving incorrect results (:issue:`55608`)
diff --git a/pandas/_libs/src/vendored/numpy/datetime/np_datetime_strings.c b/pandas/_libs/src/vendored/numpy/datetime/np_datetime_strings.c
index a0d56efc14bd9..3a9a805a9ec45 100644
--- a/pandas/_libs/src/vendored/numpy/datetime/np_datetime_strings.c
+++ b/pandas/_libs/src/vendored/numpy/datetime/np_datetime_strings.c
@@ -364,6 +364,7 @@ int parse_iso_8601_datetime(const char *str, int len, int want_exc,
goto parse_error;
}
out->hour = (*substr - '0');
+ bestunit = NPY_FR_h;
++substr;
--sublen;
/* Second digit optional */
@@ -425,6 +426,7 @@ int parse_iso_8601_datetime(const char *str, int len, int want_exc,
}
/* First digit required */
out->min = (*substr - '0');
+ bestunit = NPY_FR_m;
++substr;
--sublen;
/* Second digit optional if there was a separator */
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 0201e5d9af2ee..91314a497b1fb 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -457,6 +457,13 @@ def test_constructor_str_infer_reso(self):
assert ts == Timestamp("01-01-2013T00:00:00.000000002+0000")
assert ts.unit == "ns"
+ # GH#56208 minute reso through the ISO8601 path with tz offset
+ ts = Timestamp("2020-01-01 00:00+00:00")
+ assert ts.unit == "s"
+
+ ts = Timestamp("2020-01-01 00+00:00")
+ assert ts.unit == "s"
+
class TestTimestampConstructors:
def test_weekday_but_no_day_raises(self):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56208 | 2023-11-27T17:54:04Z | 2023-11-27T21:17:14Z | 2023-11-27T21:17:14Z | 2023-11-27T22:03:04Z |
WEB: Better management of releases | diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
index af452363666b5..4e88bfff69327 100644
--- a/.github/workflows/docbuild-and-upload.yml
+++ b/.github/workflows/docbuild-and-upload.yml
@@ -46,6 +46,9 @@ jobs:
- name: Build Pandas
uses: ./.github/actions/build_pandas
+ - name: Test website
+ run: python -m pytest web/
+
- name: Build website
run: python web/pandas_web.py web/pandas --target-path=web/build
diff --git a/web/pandas_web.py b/web/pandas_web.py
index 1cd3be456bfe0..58b5c287791c1 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -27,6 +27,7 @@
import collections
import datetime
import importlib
+import itertools
import json
import operator
import os
@@ -40,6 +41,7 @@
import feedparser
import jinja2
import markdown
+from packaging import version
import requests
import yaml
@@ -240,6 +242,7 @@ def home_add_releases(context):
context["releases"].append(
{
"name": release["tag_name"].lstrip("v"),
+ "parsed_version": version.parse(release["tag_name"].lstrip("v")),
"tag": release["tag_name"],
"published": published,
"url": (
@@ -249,7 +252,17 @@ def home_add_releases(context):
),
}
)
-
+ # sorting out obsolete versions
+ grouped_releases = itertools.groupby(
+ context["releases"],
+ key=lambda r: (r["parsed_version"].major, r["parsed_version"].minor),
+ )
+ context["releases"] = [
+ max(release_group, key=lambda r: r["parsed_version"].minor)
+ for _, release_group in grouped_releases
+ ]
+ # sorting releases by version number
+ context["releases"].sort(key=lambda r: r["parsed_version"], reverse=True)
return context
@staticmethod
diff --git a/web/tests/test_pandas_web.py b/web/tests/test_pandas_web.py
new file mode 100644
index 0000000000000..827c1d4dbea40
--- /dev/null
+++ b/web/tests/test_pandas_web.py
@@ -0,0 +1,88 @@
+from unittest.mock import (
+ mock_open,
+ patch,
+)
+
+import pytest
+import requests
+
+from web.pandas_web import Preprocessors
+
+
+class MockResponse:
+ def __init__(self, status_code: int, response: dict):
+ self.status_code = status_code
+ self._resp = response
+
+ def json(self):
+ return self._resp
+
+ @staticmethod
+ def raise_for_status():
+ return
+
+
+@pytest.fixture
+def context() -> dict:
+ return {
+ "main": {"github_repo_url": "pandas-dev/pandas"},
+ "target_path": "test_target_path",
+ }
+
+
+@pytest.fixture(scope="function")
+def mock_response(monkeypatch, request):
+ def mocked_resp(*args, **kwargs):
+ status_code, response = request.param
+ return MockResponse(status_code, response)
+
+ monkeypatch.setattr(requests, "get", mocked_resp)
+
+
+_releases_list = [
+ {
+ "prerelease": False,
+ "published_at": "2024-01-19T03:34:05Z",
+ "tag_name": "v1.5.6",
+ "assets": None,
+ },
+ {
+ "prerelease": False,
+ "published_at": "2023-11-10T19:07:37Z",
+ "tag_name": "v2.1.3",
+ "assets": None,
+ },
+ {
+ "prerelease": False,
+ "published_at": "2023-08-30T13:24:32Z",
+ "tag_name": "v2.1.0",
+ "assets": None,
+ },
+ {
+ "prerelease": False,
+ "published_at": "2023-04-30T13:24:32Z",
+ "tag_name": "v2.0.0",
+ "assets": None,
+ },
+ {
+ "prerelease": True,
+ "published_at": "2023-01-19T03:34:05Z",
+ "tag_name": "v1.5.3xd",
+ "assets": None,
+ },
+ {
+ "prerelease": False,
+ "published_at": "2027-01-19T03:34:05Z",
+ "tag_name": "v10.0.1",
+ "assets": None,
+ },
+]
+
+
+@pytest.mark.parametrize("mock_response", [(200, _releases_list)], indirect=True)
+def test_web_preprocessor_creates_releases(mock_response, context):
+ m = mock_open()
+ with patch("builtins.open", m):
+ context = Preprocessors.home_add_releases(context)
+ release_versions = [release["name"] for release in context["releases"]]
+ assert release_versions == ["10.0.1", "2.1.3", "2.0.0", "1.5.6"]
| Added releases sorting by version number and sorting out obsolete releases in web/pandas_web.py
- [ ] closes #50885
- [x] Tests added and passed
- [x] All code checks passed
| https://api.github.com/repos/pandas-dev/pandas/pulls/56207 | 2023-11-27T17:47:50Z | 2023-12-13T05:22:55Z | 2023-12-13T05:22:55Z | 2023-12-13T05:23:24Z |
Simply code by using % formatter | diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 39d5b45862a8f..ab08f4580cf5b 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -4002,8 +4002,8 @@ def css_bar(start: float, end: float, color: str) -> str:
if end > start:
cell_css += "background: linear-gradient(90deg,"
if start > 0:
- cell_css += f" transparent {start*100:.1f}%, {color} {start*100:.1f}%,"
- cell_css += f" {color} {end*100:.1f}%, transparent {end*100:.1f}%)"
+ cell_css += f" transparent {start:.1%}, {color} {start:.1%},"
+ cell_css += f" {color} {end:.1%}, transparent {end:.1%})"
return cell_css
def css_calc(x, left: float, right: float, align: str, color: str | list | tuple):
@@ -4076,7 +4076,7 @@ def css_calc(x, left: float, right: float, align: str, color: str | list | tuple
ret = css_bar(start * width, end * width, color)
if height < 1 and "background: linear-gradient(" in ret:
return (
- ret + f" no-repeat center; background-size: 100% {height * 100:.1f}%;"
+ ret + f" no-repeat center; background-size: 100% {height:.1%};"
)
else:
return ret
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56206 | 2023-11-27T16:42:31Z | 2023-11-27T17:03:56Z | null | 2023-11-27T17:03:56Z |
CI: Fix ci | diff --git a/pandas/tests/copy_view/test_chained_assignment_deprecation.py b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
index 37431f39bdaa0..55781370a5a59 100644
--- a/pandas/tests/copy_view/test_chained_assignment_deprecation.py
+++ b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
@@ -36,7 +36,7 @@ def test_methods_iloc_warn(using_copy_on_write):
def test_methods_iloc_getitem_item_cache(func, args, using_copy_on_write):
df = DataFrame({"a": [1, 2, 3], "b": 1})
ser = df.iloc[:, 0]
- TODO(CoW-warn) should warn about updating a view
+ # TODO(CoW-warn) should warn about updating a view
getattr(ser, func)(*args, inplace=True)
# parent that holds item_cache is dead, so don't increase ref count
| cc @jorisvandenbossche
merging this to get ci green again | https://api.github.com/repos/pandas-dev/pandas/pulls/56205 | 2023-11-27T14:25:44Z | 2023-11-27T14:26:05Z | 2023-11-27T14:26:05Z | 2023-11-27T14:34:52Z |
TST/CLN: Remove makeMixedDataFrame and getMixedTypeDict | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index c73d869b6c39c..14ee29d24800e 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -482,23 +482,6 @@ def makeDataFrame() -> DataFrame:
return DataFrame(data)
-def getMixedTypeDict():
- index = Index(["a", "b", "c", "d", "e"])
-
- data = {
- "A": [0.0, 1.0, 2.0, 3.0, 4.0],
- "B": [0.0, 1.0, 0.0, 1.0, 0.0],
- "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
- "D": bdate_range("1/1/2009", periods=5),
- }
-
- return index, data
-
-
-def makeMixedDataFrame() -> DataFrame:
- return DataFrame(getMixedTypeDict()[1])
-
-
def makeCustomIndex(
nentries,
nlevels,
@@ -1026,7 +1009,6 @@ def shares_memory(left, right) -> bool:
"get_dtype",
"getitem",
"get_locales",
- "getMixedTypeDict",
"get_finest_unit",
"get_obj",
"get_op_from_name",
@@ -1042,7 +1024,6 @@ def shares_memory(left, right) -> bool:
"makeDateIndex",
"makeFloatIndex",
"makeIntIndex",
- "makeMixedDataFrame",
"makeNumericIndex",
"makeObjectSeries",
"makePeriodIndex",
diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py
index 93a2f8d80019c..d0caa071fae1c 100644
--- a/pandas/tests/frame/methods/test_transpose.py
+++ b/pandas/tests/frame/methods/test_transpose.py
@@ -6,9 +6,11 @@
from pandas import (
DataFrame,
DatetimeIndex,
+ Index,
IntervalIndex,
Series,
Timestamp,
+ bdate_range,
date_range,
timedelta_range,
)
@@ -108,9 +110,17 @@ def test_transpose_float(self, float_frame):
else:
assert value == frame[col][idx]
+ def test_transpose_mixed(self):
# mixed type
- index, data = tm.getMixedTypeDict()
- mixed = DataFrame(data, index=index)
+ mixed = DataFrame(
+ {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
+ "D": bdate_range("1/1/2009", periods=5),
+ },
+ index=Index(["a", "b", "c", "d", "e"], dtype=object),
+ )
mixed_T = mixed.T
for col, s in mixed_T.items():
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index 50cf7d737eb99..89a0162af1c54 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -397,7 +397,14 @@ def check_col(key, name, size):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
- df = tm.makeMixedDataFrame().set_index("C")
+ df = DataFrame(
+ {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": pd.Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
+ "D": date_range("20130101", periods=5),
+ }
+ ).set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index df8a1e3cb7470..5b16ea9ee6b09 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -323,7 +323,14 @@ def test_to_hdf_with_min_itemsize(tmp_path, setup_path):
path = tmp_path / setup_path
# min_itemsize in index with to_hdf (GH 10381)
- df = tm.makeMixedDataFrame().set_index("C")
+ df = DataFrame(
+ {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
+ "D": date_range("20130101", periods=5),
+ }
+ ).set_index("C")
df.to_hdf(path, key="ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index c4c83e2046b76..f07c223bf0de2 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -11,6 +11,7 @@
MultiIndex,
Series,
Timestamp,
+ bdate_range,
concat,
merge,
)
@@ -57,8 +58,13 @@ def df2(self):
@pytest.fixture
def target_source(self):
- index, data = tm.getMixedTypeDict()
- target = DataFrame(data, index=index)
+ data = {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
+ "D": bdate_range("1/1/2009", periods=5),
+ }
+ target = DataFrame(data, index=Index(["a", "b", "c", "d", "e"], dtype=object))
# Join on string value
diff --git a/pandas/tests/series/methods/test_map.py b/pandas/tests/series/methods/test_map.py
index f86f6069a2ef3..7b14e1289abf0 100644
--- a/pandas/tests/series/methods/test_map.py
+++ b/pandas/tests/series/methods/test_map.py
@@ -14,6 +14,7 @@
Index,
MultiIndex,
Series,
+ bdate_range,
isna,
timedelta_range,
)
@@ -154,8 +155,13 @@ def test_list_raises(string_series):
string_series.map([lambda x: x])
-def test_map(datetime_series):
- index, data = tm.getMixedTypeDict()
+def test_map():
+ data = {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
+ "D": bdate_range("1/1/2009", periods=5),
+ }
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
@@ -171,10 +177,14 @@ def test_map(datetime_series):
for k, v in merged.items():
assert v == source[target[k]]
+
+def test_map_datetime(datetime_series):
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
+
+def test_map_category():
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
@@ -185,6 +195,8 @@ def test_map(datetime_series):
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
+
+def test_map_category_numeric():
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
@@ -194,6 +206,8 @@ def test_map(datetime_series):
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
+
+def test_map_category_string():
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index e7c4c27714d5f..5f1d905aa4a46 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -136,7 +136,14 @@ def test_multiindex_objects():
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
DataFrame(),
DataFrame(np.full((10, 4), np.nan)),
- tm.makeMixedDataFrame(),
+ DataFrame(
+ {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
+ "D": pd.date_range("20130101", periods=5),
+ }
+ ),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
Series(tm.makePeriodIndex()),
@@ -162,7 +169,14 @@ def test_hash_pandas_object(obj, index):
Series([True, False, True]),
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
DataFrame(np.full((10, 4), np.nan)),
- tm.makeMixedDataFrame(),
+ DataFrame(
+ {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
+ "D": pd.date_range("20130101", periods=5),
+ }
+ ),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
Series(tm.makePeriodIndex()),
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56202 | 2023-11-27T03:42:20Z | 2023-11-27T11:15:40Z | 2023-11-27T11:15:40Z | 2024-01-30T21:09:31Z |
DOC: Add clarifications to docs for setting up a development environment | diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst
index 7fc42f6021f00..325c902dd4f9e 100644
--- a/doc/source/development/contributing_environment.rst
+++ b/doc/source/development/contributing_environment.rst
@@ -44,8 +44,9 @@ and consult the ``Linux`` instructions below.
**macOS**
To use the :ref:`mamba <contributing.mamba>`-based compilers, you will need to install the
-Developer Tools using ``xcode-select --install``. Otherwise
-information about compiler installation can be found here:
+Developer Tools using ``xcode-select --install``.
+
+If you prefer to use a different compiler, general information can be found here:
https://devguide.python.org/setup/#macos
**Linux**
@@ -86,12 +87,12 @@ Before we begin, please:
Option 1: using mamba (recommended)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-* Install `mamba <https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html>`_
+* Install miniforge to get `mamba <https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html>`_
* Make sure your mamba is up to date (``mamba update mamba``)
+* Create and activate the ``pandas-dev`` mamba environment using the following commands:
.. code-block:: none
- # Create and activate the build environment
mamba env create --file environment.yml
mamba activate pandas-dev
@@ -273,6 +274,8 @@ uses to import the extension from the build folder, which may cause errors such
You will need to repeat this step each time the C extensions change, for example
if you modified any file in ``pandas/_libs`` or if you did a fetch and merge from ``upstream/main``.
+**Checking the build**
+
At this point you should be able to import pandas from your locally built version::
$ python
@@ -280,6 +283,12 @@ At this point you should be able to import pandas from your locally built versio
>>> print(pandas.__version__) # note: the exact output may differ
2.0.0.dev0+880.g2b9e661fbb.dirty
+
+At this point you may want to try
+`running the test suite <https://pandas.pydata.org/docs/dev/development/contributing_codebase.html#running-the-test-suite>`_.
+
+**Keeping up to date with the latest build**
+
When building pandas with meson, importing pandas will automatically trigger a rebuild, even when C/Cython files are modified.
By default, no output will be produced by this rebuild (the import will just take longer). If you would like to see meson's
output when importing pandas, you can set the environment variable ``MESONPY_EDTIABLE_VERBOSE``. For example, this would be::
| Hi all - having just been through the setup process (Mac, mamba) and as a non-expert I thought I'd feed back some things that would have made my experience faster and smoother.
---
**Change 1**
- It wasn't 100% clear to me that I did **_not_** need to use any information from the link `https://devguide.python.org/setup/#macos` (it is clear with hindsight, but it's the initial impression that I'm trying to improve here):
Before this change:
<kbd><img width="750" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/c72c8c93-4b70-4918-bdfe-557c859d40a7"></kbd>
After:
<kbd><img width="683" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/8025886f-4d85-4987-b29b-398897662035"></kbd>
---
**Change 2**
- I got confused by `mamba` vs `conda`, and that you actually need to install is called `miniforge` - I had a moment of doubt here that I was on the wrong track.
Before this change:
<kbd><img width="617" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/bd87599c-30a0-4f88-8a11-087cb8824dbf"></kbd>
<kbd><img width="729" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/123675ee-e30e-4385-99b3-5c085613de99"></kbd>
---
**Change 3**
- I found that I had some old packages that were used in my mamba environment (probably from a previous install of mamba - last I played with this was about a year ago). Looking into them, they were packages like `sphinx` that did not have a version pinned in `environment.yml`. This feels like it could be a source of errors for people getting set up.
After:
<kbd><img width="769" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/395165e2-3b39-4a5a-9f7f-d377319a2453"></kbd>
---
**Change 4**
- Sounds strange, but I wasn't sure when I was actually 'done' with getting set up.
- I tend to like to run unit tests when I get set up with a new repo.
- I didn't realize that with this setup some of the unit tests (the ones with external dependencies) were expected to fail, as helpfully documented [here](https://pandas.pydata.org/docs/dev/development/contributing_codebase.html#running-the-test-suite). I feel this is really important - I lost a lot of time because I assumed there must be something wrong with my install because unit tests were failing. By adding the link here hopefully others see this information sooner.
Before this change:
<kbd><img width="776" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/7e689a9d-86db-4a28-96f5-46322f33c417"></kbd>
After:
<kbd><img width="791" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/079ce0c7-bc8b-4359-a03e-fa4e0f35c71e"></kbd>
---
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56201 | 2023-11-27T03:29:52Z | 2023-11-28T15:34:39Z | 2023-11-28T15:34:39Z | 2023-11-28T15:34:47Z |
TST/CLN: Remove makeUInt/Timedelta/RangeIndex | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 14ee29d24800e..d30929d07245b 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -47,6 +47,7 @@
RangeIndex,
Series,
bdate_range,
+ timedelta_range,
)
from pandas._testing._io import (
round_trip_localpath,
@@ -111,10 +112,7 @@
NpDtype,
)
- from pandas import (
- PeriodIndex,
- TimedeltaIndex,
- )
+ from pandas import PeriodIndex
from pandas.core.arrays import ArrowExtensionArray
_N = 30
@@ -405,17 +403,6 @@ def makeIntIndex(k: int = 10, *, name=None, dtype: Dtype = "int64") -> Index:
return makeNumericIndex(k, name=name, dtype=dtype)
-def makeUIntIndex(k: int = 10, *, name=None, dtype: Dtype = "uint64") -> Index:
- dtype = pandas_dtype(dtype)
- if not is_unsigned_integer_dtype(dtype):
- raise TypeError(f"Wrong dtype {dtype}")
- return makeNumericIndex(k, name=name, dtype=dtype)
-
-
-def makeRangeIndex(k: int = 10, name=None, **kwargs) -> RangeIndex:
- return RangeIndex(0, k, 1, name=name, **kwargs)
-
-
def makeFloatIndex(k: int = 10, *, name=None, dtype: Dtype = "float64") -> Index:
dtype = pandas_dtype(dtype)
if not is_float_dtype(dtype):
@@ -431,12 +418,6 @@ def makeDateIndex(
return DatetimeIndex(dr, name=name, **kwargs)
-def makeTimedeltaIndex(
- k: int = 10, freq: Frequency = "D", name=None, **kwargs
-) -> TimedeltaIndex:
- return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
-
-
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
pi = pd.period_range(start=dt, periods=k, freq="D", name=name, **kwargs)
@@ -537,7 +518,7 @@ def makeCustomIndex(
"f": makeFloatIndex,
"s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]),
"dt": makeDateIndex,
- "td": makeTimedeltaIndex,
+ "td": lambda n: timedelta_range("1 day", periods=n),
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
@@ -1027,11 +1008,8 @@ def shares_memory(left, right) -> bool:
"makeNumericIndex",
"makeObjectSeries",
"makePeriodIndex",
- "makeRangeIndex",
"makeTimeDataFrame",
- "makeTimedeltaIndex",
"makeTimeSeries",
- "makeUIntIndex",
"maybe_produces_warning",
"NARROW_NP_DTYPES",
"NP_NAT_OBJECTS",
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 3205b6657439f..a7e05d3ebddc5 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -63,9 +63,11 @@
Interval,
IntervalIndex,
Period,
+ RangeIndex,
Series,
Timedelta,
Timestamp,
+ timedelta_range,
)
import pandas._testing as tm
from pandas.core import ops
@@ -614,16 +616,16 @@ def _create_mi_with_dt64tz_level():
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
- "timedelta": tm.makeTimedeltaIndex(100),
- "range": tm.makeRangeIndex(100),
+ "timedelta": timedelta_range(start="1 day", periods=100, freq="D"),
+ "range": RangeIndex(100),
"int8": tm.makeIntIndex(100, dtype="int8"),
"int16": tm.makeIntIndex(100, dtype="int16"),
"int32": tm.makeIntIndex(100, dtype="int32"),
"int64": tm.makeIntIndex(100, dtype="int64"),
- "uint8": tm.makeUIntIndex(100, dtype="uint8"),
- "uint16": tm.makeUIntIndex(100, dtype="uint16"),
- "uint32": tm.makeUIntIndex(100, dtype="uint32"),
- "uint64": tm.makeUIntIndex(100, dtype="uint64"),
+ "uint8": Index(np.arange(100), dtype="uint8"),
+ "uint16": Index(np.arange(100), dtype="uint16"),
+ "uint32": Index(np.arange(100), dtype="uint32"),
+ "uint64": Index(np.arange(100), dtype="uint64"),
"float32": tm.makeFloatIndex(100, dtype="float32"),
"float64": tm.makeFloatIndex(100, dtype="float64"),
"bool-object": Index([True, False] * 5, dtype=object),
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 0d71fb0926df9..17d42506aaded 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -18,7 +18,10 @@
Categorical,
CategoricalDtype,
DataFrame,
+ DatetimeIndex,
Index,
+ PeriodIndex,
+ RangeIndex,
Series,
Timestamp,
date_range,
@@ -598,7 +601,7 @@ def test_sem(self, datetime_frame):
"C": [1.0],
"D": ["a"],
"E": Categorical(["a"], categories=["a"]),
- "F": pd.DatetimeIndex(["2000-01-02"], dtype="M8[ns]"),
+ "F": DatetimeIndex(["2000-01-02"], dtype="M8[ns]"),
"G": to_timedelta(["1 days"]),
},
),
@@ -610,7 +613,7 @@ def test_sem(self, datetime_frame):
"C": [np.nan],
"D": np.array([np.nan], dtype=object),
"E": Categorical([np.nan], categories=["a"]),
- "F": pd.DatetimeIndex([pd.NaT], dtype="M8[ns]"),
+ "F": DatetimeIndex([pd.NaT], dtype="M8[ns]"),
"G": to_timedelta([pd.NaT]),
},
),
@@ -621,7 +624,7 @@ def test_sem(self, datetime_frame):
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]),
- "L": pd.DatetimeIndex(
+ "L": DatetimeIndex(
["2000-01-02", "NaT", "NaT", "NaT"], dtype="M8[ns]"
),
"M": to_timedelta(["1 days", "nan", "nan", "nan"]),
@@ -635,7 +638,7 @@ def test_sem(self, datetime_frame):
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]),
- "L": pd.DatetimeIndex(
+ "L": DatetimeIndex(
["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
),
"M": to_timedelta(["nan", "1 days", "nan", "nan"]),
@@ -652,15 +655,13 @@ def test_mode_dropna(self, dropna, expected):
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, "a", np.nan],
"E": Categorical([np.nan, np.nan, "a", np.nan]),
- "F": pd.DatetimeIndex(
- ["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
- ),
+ "F": DatetimeIndex(["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"),
"G": to_timedelta(["1 days", "nan", "nan", "nan"]),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(["a", np.nan, "a", np.nan]),
- "L": pd.DatetimeIndex(
+ "L": DatetimeIndex(
["2000-01-02", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
),
"M": to_timedelta(["1 days", "nan", "1 days", "nan"]),
@@ -830,15 +831,15 @@ def test_sum_corner(self):
@pytest.mark.parametrize(
"index",
[
- tm.makeRangeIndex(0),
- tm.makeDateIndex(0),
- tm.makeNumericIndex(0, dtype=int),
- tm.makeNumericIndex(0, dtype=float),
- tm.makeDateIndex(0, freq="ME"),
- tm.makePeriodIndex(0),
+ RangeIndex(0),
+ DatetimeIndex([]),
+ Index([], dtype=np.int64),
+ Index([], dtype=np.float64),
+ DatetimeIndex([], freq="ME"),
+ PeriodIndex([], freq="D"),
],
)
- def test_axis_1_empty(self, all_reductions, index, using_array_manager):
+ def test_axis_1_empty(self, all_reductions, index):
df = DataFrame(columns=["a"], index=index)
result = getattr(df, all_reductions)(axis=1)
if all_reductions in ("any", "all"):
diff --git a/pandas/tests/indexes/datetimelike_/test_equals.py b/pandas/tests/indexes/datetimelike_/test_equals.py
index 7845d99614d34..fc9fbd33d0d28 100644
--- a/pandas/tests/indexes/datetimelike_/test_equals.py
+++ b/pandas/tests/indexes/datetimelike_/test_equals.py
@@ -18,6 +18,7 @@
TimedeltaIndex,
date_range,
period_range,
+ timedelta_range,
)
import pandas._testing as tm
@@ -141,7 +142,7 @@ def test_not_equals_bday(self, freq):
class TestTimedeltaIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
- return tm.makeTimedeltaIndex(10)
+ return timedelta_range("1 day", periods=10)
def test_equals2(self):
# GH#13107
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 8e11fc28cc387..662f31cc3560e 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -30,6 +30,7 @@
TimedeltaIndex,
date_range,
period_range,
+ timedelta_range,
)
import pandas._testing as tm
from pandas.core.indexes.api import (
@@ -92,7 +93,7 @@ def test_constructor_copy(self, index):
name="Green Eggs & Ham",
), # DTI with tz
date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz
- pd.timedelta_range("1 days", freq="D", periods=3), # td
+ timedelta_range("1 days", freq="D", periods=3), # td
period_range("2015-01-01", freq="D", periods=3), # period
],
)
@@ -122,7 +123,7 @@ def test_constructor_from_index_dtlike(self, cast_as_obj, index):
date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
True,
), # datetimetz
- (pd.timedelta_range("1 days", freq="D", periods=3), False), # td
+ (timedelta_range("1 days", freq="D", periods=3), False), # td
(period_range("2015-01-01", freq="D", periods=3), False), # period
],
)
@@ -267,7 +268,7 @@ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass):
@pytest.mark.parametrize("attr", ["values", "asi8"])
@pytest.mark.parametrize("klass", [Index, TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
- index = pd.timedelta_range("1 days", periods=5)
+ index = timedelta_range("1 days", periods=5)
index = index._with_freq(None) # won't be preserved by constructors
dtype = index.dtype
@@ -526,10 +527,14 @@ def test_map_with_tuples_mi(self):
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize(
- "attr", ["makeDateIndex", "makePeriodIndex", "makeTimedeltaIndex"]
+ "index",
+ [
+ date_range("2020-01-01", freq="D", periods=10),
+ period_range("2020-01-01", freq="D", periods=10),
+ timedelta_range("1 day", periods=10),
+ ],
)
- def test_map_tseries_indices_return_index(self, attr):
- index = getattr(tm, attr)(10)
+ def test_map_tseries_indices_return_index(self, index):
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index dab2475240267..a489c51a808fd 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -139,19 +139,16 @@ def test_union_different_types(index_flat, index_flat2, request):
@pytest.mark.parametrize(
- "idx_fact1,idx_fact2",
+ "idx1,idx2",
[
- (tm.makeIntIndex, tm.makeRangeIndex),
- (tm.makeFloatIndex, tm.makeIntIndex),
- (tm.makeFloatIndex, tm.makeRangeIndex),
- (tm.makeFloatIndex, tm.makeUIntIndex),
+ (Index(np.arange(5), dtype=np.int64), RangeIndex(5)),
+ (Index(np.arange(5), dtype=np.float64), Index(np.arange(5), dtype=np.int64)),
+ (Index(np.arange(5), dtype=np.float64), RangeIndex(5)),
+ (Index(np.arange(5), dtype=np.float64), Index(np.arange(5), dtype=np.uint64)),
],
)
-def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
+def test_compatible_inconsistent_pairs(idx1, idx2):
# GH 23525
- idx1 = idx_fact1(10)
- idx2 = idx_fact2(20)
-
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index cf9966145afce..1fe431e12f2a1 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -132,14 +132,16 @@ def test_scalar_with_mixed(self, indexer_sl):
expected = 3
assert result == expected
- @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex])
- def test_scalar_integer(self, index_func, frame_or_series, indexer_sl):
+ @pytest.mark.parametrize(
+ "index", [Index(np.arange(5), dtype=np.int64), RangeIndex(5)]
+ )
+ def test_scalar_integer(self, index, frame_or_series, indexer_sl):
getitem = indexer_sl is not tm.loc
# test how scalar float indexers work on int indexes
# integer index
- i = index_func(5)
+ i = index
obj = gen_obj(frame_or_series, i)
# coerce to equal int
@@ -169,11 +171,12 @@ def compare(x, y):
result = indexer_sl(s2)[3]
compare(result, expected)
- @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex])
- def test_scalar_integer_contains_float(self, index_func, frame_or_series):
+ @pytest.mark.parametrize(
+ "index", [Index(np.arange(5), dtype=np.int64), RangeIndex(5)]
+ )
+ def test_scalar_integer_contains_float(self, index, frame_or_series):
# contains
# integer index
- index = index_func(5)
obj = gen_obj(frame_or_series, index)
# coerce to equal int
@@ -348,11 +351,11 @@ def test_integer_positional_indexing(self, idx):
with pytest.raises(TypeError, match=msg):
s.iloc[idx]
- @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex])
- def test_slice_integer_frame_getitem(self, index_func):
+ @pytest.mark.parametrize(
+ "index", [Index(np.arange(5), dtype=np.int64), RangeIndex(5)]
+ )
+ def test_slice_integer_frame_getitem(self, index):
# similar to above, but on the getitem dim (of a DataFrame)
- index = index_func(5)
-
s = DataFrame(np.random.default_rng(2).standard_normal((5, 2)), index=index)
# getitem
@@ -403,11 +406,11 @@ def test_slice_integer_frame_getitem(self, index_func):
s[idx]
@pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)])
- @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex])
- def test_float_slice_getitem_with_integer_index_raises(self, idx, index_func):
+ @pytest.mark.parametrize(
+ "index", [Index(np.arange(5), dtype=np.int64), RangeIndex(5)]
+ )
+ def test_float_slice_getitem_with_integer_index_raises(self, idx, index):
# similar to above, but on the getitem dim (of a DataFrame)
- index = index_func(5)
-
s = DataFrame(np.random.default_rng(2).standard_normal((5, 2)), index=index)
# setitem
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 5b16ea9ee6b09..7e8365a8f9ffa 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -17,6 +17,7 @@
Timestamp,
concat,
date_range,
+ period_range,
timedelta_range,
)
import pandas._testing as tm
@@ -953,25 +954,23 @@ def test_columns_multiindex_modified(tmp_path, setup_path):
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
-def test_to_hdf_with_object_column_names(tmp_path, setup_path):
+@pytest.mark.parametrize(
+ "columns",
+ [
+ Index([0, 1], dtype=np.int64),
+ Index([0.0, 1.0], dtype=np.float64),
+ date_range("2020-01-01", periods=2),
+ timedelta_range("1 day", periods=2),
+ period_range("2020-01-01", periods=2, freq="D"),
+ ],
+)
+def test_to_hdf_with_object_column_names_should_fail(tmp_path, setup_path, columns):
# GH9057
-
- types_should_fail = [
- tm.makeIntIndex,
- tm.makeFloatIndex,
- tm.makeDateIndex,
- tm.makeTimedeltaIndex,
- tm.makePeriodIndex,
- ]
-
- for index in types_should_fail:
- df = DataFrame(
- np.random.default_rng(2).standard_normal((10, 2)), columns=index(2)
- )
- path = tmp_path / setup_path
- msg = "cannot have non-object label DataIndexableCol"
- with pytest.raises(ValueError, match=msg):
- df.to_hdf(path, key="df", format="table", data_columns=True)
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), columns=columns)
+ path = tmp_path / setup_path
+ msg = "cannot have non-object label DataIndexableCol"
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, key="df", format="table", data_columns=True)
@pytest.mark.parametrize("dtype", [None, "category"])
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 9d69321ff7dbb..3349b886bbef6 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -10,6 +10,7 @@
Index,
Series,
date_range,
+ timedelta_range,
)
import pandas._testing as tm
@@ -73,9 +74,9 @@ def test_tab_completion_with_categorical(self):
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
- tm.makeTimedeltaIndex(10),
+ timedelta_range("1 day", periods=10),
tm.makeIntIndex(10),
- tm.makeUIntIndex(10),
+ Index(np.arange(10), dtype=np.uint64),
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
@@ -178,7 +179,7 @@ def test_inspect_getmembers(self):
def test_unknown_attribute(self):
# GH#9680
- tdi = pd.timedelta_range(start=0, periods=10, freq="1s")
+ tdi = timedelta_range(start=0, periods=10, freq="1s")
ser = Series(np.random.default_rng(2).normal(size=10), index=tdi)
assert "foo" not in ser.__dict__
msg = "'Series' object has no attribute 'foo'"
diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py
index 5d22896d9d055..45741e852fef7 100644
--- a/pandas/tests/tseries/frequencies/test_inference.py
+++ b/pandas/tests/tseries/frequencies/test_inference.py
@@ -17,12 +17,12 @@
from pandas import (
DatetimeIndex,
Index,
+ RangeIndex,
Series,
Timestamp,
date_range,
period_range,
)
-import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
@@ -374,10 +374,10 @@ def test_non_datetime_index2():
@pytest.mark.parametrize(
"idx",
[
- tm.makeIntIndex(10),
- tm.makeFloatIndex(10),
- tm.makePeriodIndex(10),
- tm.makeRangeIndex(10),
+ Index(np.arange(5), dtype=np.int64),
+ Index(np.arange(5), dtype=np.float64),
+ period_range("2020-01-01", periods=5),
+ RangeIndex(5),
],
)
def test_invalid_index_types(idx):
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index 5f1d905aa4a46..0417c7a631da2 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -7,6 +7,8 @@
Index,
MultiIndex,
Series,
+ period_range,
+ timedelta_range,
)
import pandas._testing as tm
from pandas.core.util.hashing import hash_tuples
@@ -25,7 +27,7 @@
Series([True, False, True] * 3),
Series(pd.date_range("20130101", periods=9)),
Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
- Series(pd.timedelta_range("2000", periods=9)),
+ Series(timedelta_range("2000", periods=9)),
]
)
def series(request):
@@ -194,8 +196,8 @@ def test_hash_pandas_object_diff_index_non_empty(obj):
[
Index([1, 2, 3]),
Index([True, False, True]),
- tm.makeTimedeltaIndex(),
- tm.makePeriodIndex(),
+ timedelta_range("1 day", periods=2),
+ period_range("2020-01-01", freq="D", periods=2),
MultiIndex.from_product(
[range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]
),
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56200 | 2023-11-27T03:03:05Z | 2023-11-28T15:54:17Z | 2023-11-28T15:54:17Z | 2023-11-28T15:54:21Z |
Backport PR #56194 on branch 2.1.x (BUG: hdf can't deal with ea dtype columns) | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index eb28e42d303a1..684b68baa123c 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -24,6 +24,7 @@ Bug fixes
- Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55753`)
- Bug in :meth:`Index.__getitem__` returning wrong result for Arrow dtypes and negative stepsize (:issue:`55832`)
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
+- Fixed bug in :meth:`DataFrame.to_hdf` raising when columns have ``StringDtype`` (:issue:`55088`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d3055f0ad2a38..2a72f7d32b1e7 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -56,7 +56,6 @@
is_bool_dtype,
is_complex_dtype,
is_list_like,
- is_object_dtype,
is_string_dtype,
needs_i8_conversion,
)
@@ -2645,7 +2644,7 @@ class DataIndexableCol(DataCol):
is_data_indexable = True
def validate_names(self) -> None:
- if not is_object_dtype(Index(self.values).dtype):
+ if not is_string_dtype(Index(self.values).dtype):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 085db5f521a9f..3bc8c0c4f8e30 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -526,3 +526,18 @@ def test_round_trip_equals(tmp_path, setup_path):
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
+
+
+def test_infer_string_columns(tmp_path, setup_path):
+ # GH#
+ pytest.importorskip("pyarrow")
+ path = tmp_path / setup_path
+ with pd.option_context("future.infer_string", True):
+ df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index(
+ ["A", "B"]
+ )
+ expected = df.copy()
+ df.to_hdf(path, key="df", format="table")
+
+ result = read_hdf(path, "df")
+ tm.assert_frame_equal(result, expected)
| Backport PR #56194: BUG: hdf can't deal with ea dtype columns | https://api.github.com/repos/pandas-dev/pandas/pulls/56199 | 2023-11-27T02:43:13Z | 2023-11-27T11:09:55Z | 2023-11-27T11:09:55Z | 2023-11-27T11:09:56Z |
Adjust tests in xml folder for new string option | diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index 88655483800ee..e4456b0a78e06 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -32,6 +32,7 @@
ArrowStringArray,
StringArray,
)
+from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics
from pandas.io.common import get_handle
from pandas.io.xml import read_xml
@@ -2004,7 +2005,9 @@ def test_s3_parser_consistency(s3_public_bucket_with_data, s3so):
tm.assert_frame_equal(df_lxml, df_etree)
-def test_read_xml_nullable_dtypes(parser, string_storage, dtype_backend):
+def test_read_xml_nullable_dtypes(
+ parser, string_storage, dtype_backend, using_infer_string
+):
# GH#50500
data = """<?xml version='1.0' encoding='utf-8'?>
<data xmlns="http://example.com">
@@ -2032,7 +2035,12 @@ def test_read_xml_nullable_dtypes(parser, string_storage, dtype_backend):
</row>
</data>"""
- if string_storage == "python":
+ if using_infer_string:
+ pa = pytest.importorskip("pyarrow")
+ string_array = ArrowStringArrayNumpySemantics(pa.array(["x", "y"]))
+ string_array_na = ArrowStringArrayNumpySemantics(pa.array(["x", None]))
+
+ elif string_storage == "python":
string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56198 | 2023-11-26T23:40:07Z | 2023-11-27T02:31:24Z | 2023-11-27T02:31:24Z | 2023-11-27T11:10:22Z |
Adjust tests in json folder for new string option | diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 79dbe448e9cbe..7569a74752bf2 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -56,7 +56,7 @@ def df_table():
class TestBuildSchema:
- def test_build_table_schema(self, df_schema):
+ def test_build_table_schema(self, df_schema, using_infer_string):
result = build_table_schema(df_schema, version=False)
expected = {
"fields": [
@@ -68,6 +68,8 @@ def test_build_table_schema(self, df_schema):
],
"primaryKey": ["idx"],
}
+ if using_infer_string:
+ expected["fields"][2] = {"name": "B", "type": "any", "extDtype": "string"}
assert result == expected
result = build_table_schema(df_schema)
assert "pandas_version" in result
@@ -97,7 +99,7 @@ def test_series_unnamed(self):
}
assert result == expected
- def test_multiindex(self, df_schema):
+ def test_multiindex(self, df_schema, using_infer_string):
df = df_schema
idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)])
df.index = idx
@@ -114,6 +116,13 @@ def test_multiindex(self, df_schema):
],
"primaryKey": ["level_0", "level_1"],
}
+ if using_infer_string:
+ expected["fields"][0] = {
+ "name": "level_0",
+ "type": "any",
+ "extDtype": "string",
+ }
+ expected["fields"][3] = {"name": "B", "type": "any", "extDtype": "string"}
assert result == expected
df.index.names = ["idx0", None]
@@ -156,7 +165,10 @@ def test_as_json_table_type_bool_data(self, bool_type):
def test_as_json_table_type_date_data(self, date_data):
assert as_json_table_type(date_data.dtype) == "datetime"
- @pytest.mark.parametrize("str_data", [pd.Series(["a", "b"]), pd.Index(["a", "b"])])
+ @pytest.mark.parametrize(
+ "str_data",
+ [pd.Series(["a", "b"], dtype=object), pd.Index(["a", "b"], dtype=object)],
+ )
def test_as_json_table_type_string_data(self, str_data):
assert as_json_table_type(str_data.dtype) == "string"
@@ -261,7 +273,7 @@ def test_read_json_from_to_json_results(self):
tm.assert_frame_equal(result1, df)
tm.assert_frame_equal(result2, df)
- def test_to_json(self, df_table):
+ def test_to_json(self, df_table, using_infer_string):
df = df_table
df.index.name = "idx"
result = df.to_json(orient="table", date_format="iso")
@@ -292,6 +304,9 @@ def test_to_json(self, df_table):
{"name": "H", "type": "datetime", "tz": "US/Central"},
]
+ if using_infer_string:
+ fields[2] = {"name": "B", "type": "any", "extDtype": "string"}
+
schema = {"fields": fields, "primaryKey": ["idx"]}
data = [
OrderedDict(
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 5275050391ca3..052356fdc96ed 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -13,6 +13,8 @@
import numpy as np
import pytest
+from pandas._config import using_pyarrow_string_dtype
+
from pandas.compat import IS64
import pandas.util._test_decorators as td
@@ -30,6 +32,7 @@
ArrowStringArray,
StringArray,
)
+from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics
from pandas.io.json import ujson_dumps
@@ -238,7 +241,7 @@ def test_roundtrip_str_axes(self, orient, convert_axes, dtype):
@pytest.mark.parametrize("convert_axes", [True, False])
def test_roundtrip_categorical(
- self, request, orient, categorical_frame, convert_axes
+ self, request, orient, categorical_frame, convert_axes, using_infer_string
):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
@@ -252,7 +255,9 @@ def test_roundtrip_categorical(
result = read_json(data, orient=orient, convert_axes=convert_axes)
expected = categorical_frame.copy()
- expected.index = expected.index.astype(str) # Categorical not preserved
+ expected.index = expected.index.astype(
+ str if not using_infer_string else "string[pyarrow_numpy]"
+ ) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
assert_json_roundtrip_equal(result, expected, orient)
@@ -518,9 +523,9 @@ def test_v12_compat(self, datapath):
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = read_json(v12_iso_json)
- tm.assert_frame_equal(df_iso, df_unser_iso)
+ tm.assert_frame_equal(df_iso, df_unser_iso, check_column_type=False)
- def test_blocks_compat_GH9037(self):
+ def test_blocks_compat_GH9037(self, using_infer_string):
index = pd.date_range("20000101", periods=10, freq="h")
# freq doesn't round-trip
index = DatetimeIndex(list(index), freq=None)
@@ -604,7 +609,9 @@ def test_blocks_compat_GH9037(self):
)
# JSON deserialisation always creates unicode strings
- df_mixed.columns = df_mixed.columns.astype(np.str_)
+ df_mixed.columns = df_mixed.columns.astype(
+ np.str_ if not using_infer_string else "string[pyarrow_numpy]"
+ )
data = StringIO(df_mixed.to_json(orient="split"))
df_roundtrip = read_json(data, orient="split")
tm.assert_frame_equal(
@@ -676,16 +683,19 @@ def test_series_non_unique_index(self):
unserialized = read_json(
StringIO(s.to_json(orient="records")), orient="records", typ="series"
)
- tm.assert_numpy_array_equal(s.values, unserialized.values)
+ tm.assert_equal(s.values, unserialized.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
- def test_series_roundtrip_simple(self, orient, string_series):
+ def test_series_roundtrip_simple(self, orient, string_series, using_infer_string):
data = StringIO(string_series.to_json(orient=orient))
result = read_json(data, typ="series", orient=orient)
expected = string_series
+ if using_infer_string and orient in ("split", "index", "columns"):
+ # These schemas don't contain dtypes, so we infer string
+ expected.index = expected.index.astype("string[pyarrow_numpy]")
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
@@ -1459,6 +1469,9 @@ def test_from_json_to_json_table_dtypes(self):
result = read_json(StringIO(dfjson), orient="table")
tm.assert_frame_equal(result, expected)
+ # TODO: We are casting to string which coerces None to NaN before casting back
+ # to object, ending up with incorrect na values
+ @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="incorrect na conversion")
@pytest.mark.parametrize("orient", ["split", "records", "index", "columns"])
def test_to_json_from_json_columns_dtypes(self, orient):
# GH21892 GH33205
@@ -1716,6 +1729,11 @@ def test_to_json_indent(self, indent):
assert result == expected
+ @pytest.mark.skipif(
+ using_pyarrow_string_dtype(),
+ reason="Adjust expected when infer_string is default, no bug here, "
+ "just a complicated parametrization",
+ )
@pytest.mark.parametrize(
"orient,expected",
[
@@ -1991,7 +2009,9 @@ def test_json_uint64(self):
@pytest.mark.parametrize(
"orient", ["split", "records", "values", "index", "columns"]
)
- def test_read_json_dtype_backend(self, string_storage, dtype_backend, orient):
+ def test_read_json_dtype_backend(
+ self, string_storage, dtype_backend, orient, using_infer_string
+ ):
# GH#50750
pa = pytest.importorskip("pyarrow")
df = DataFrame(
@@ -2007,7 +2027,10 @@ def test_read_json_dtype_backend(self, string_storage, dtype_backend, orient):
}
)
- if string_storage == "python":
+ if using_infer_string:
+ string_array = ArrowStringArrayNumpySemantics(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowStringArrayNumpySemantics(pa.array(["a", "b", None]))
+ elif string_storage == "python":
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
| sits on #56195 | https://api.github.com/repos/pandas-dev/pandas/pulls/56197 | 2023-11-26T23:37:34Z | 2023-11-30T17:41:24Z | 2023-11-30T17:41:24Z | 2023-11-30T17:42:00Z |
DOC: reoder whatsnew enhancements | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index d252c19a95d4a..6bd20ace44b65 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -14,81 +14,6 @@ including other versions of pandas.
Enhancements
~~~~~~~~~~~~
-.. _whatsnew_220.enhancements.calamine:
-
-Calamine engine for :func:`read_excel`
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``calamine`` engine was added to :func:`read_excel`.
-It uses ``python-calamine``, which provides Python bindings for the Rust library `calamine <https://crates.io/crates/calamine>`__.
-This engine supports Excel files (``.xlsx``, ``.xlsm``, ``.xls``, ``.xlsb``) and OpenDocument spreadsheets (``.ods``) (:issue:`50395`).
-
-There are two advantages of this engine:
-
-1. Calamine is often faster than other engines, some benchmarks show results up to 5x faster than 'openpyxl', 20x - 'odf', 4x - 'pyxlsb', and 1.5x - 'xlrd'.
- But, 'openpyxl' and 'pyxlsb' are faster in reading a few rows from large files because of lazy iteration over rows.
-2. Calamine supports the recognition of datetime in ``.xlsb`` files, unlike 'pyxlsb' which is the only other engine in pandas that can read ``.xlsb`` files.
-
-.. code-block:: python
-
- pd.read_excel("path_to_file.xlsb", engine="calamine")
-
-
-For more, see :ref:`io.calamine` in the user guide on IO tools.
-
-.. _whatsnew_220.enhancements.struct_accessor:
-
-Series.struct accessor to with PyArrow structured data
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``Series.struct`` accessor provides attributes and methods for processing
-data with ``struct[pyarrow]`` dtype Series. For example,
-:meth:`Series.struct.explode` converts PyArrow structured data to a pandas
-DataFrame. (:issue:`54938`)
-
-.. ipython:: python
-
- import pyarrow as pa
- series = pd.Series(
- [
- {"project": "pandas", "version": "2.2.0"},
- {"project": "numpy", "version": "1.25.2"},
- {"project": "pyarrow", "version": "13.0.0"},
- ],
- dtype=pd.ArrowDtype(
- pa.struct([
- ("project", pa.string()),
- ("version", pa.string()),
- ])
- ),
- )
- series.struct.explode()
-
-.. _whatsnew_220.enhancements.list_accessor:
-
-Series.list accessor for PyArrow list data
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``Series.list`` accessor provides attributes and methods for processing
-data with ``list[pyarrow]`` dtype Series. For example,
-:meth:`Series.list.__getitem__` allows indexing pyarrow lists in
-a Series. (:issue:`55323`)
-
-.. ipython:: python
-
- import pyarrow as pa
- series = pd.Series(
- [
- [1, 2, 3],
- [4, 5],
- [6],
- ],
- dtype=pd.ArrowDtype(
- pa.list_(pa.int64())
- ),
- )
- series.list[0]
-
.. _whatsnew_220.enhancements.adbc_support:
ADBC Driver support in to_sql and read_sql
@@ -180,6 +105,81 @@ For a full list of ADBC drivers and their development status, see the `ADBC Driv
Implementation Status <https://arrow.apache.org/adbc/current/driver/status.html>`_
documentation.
+.. _whatsnew_220.enhancements.struct_accessor:
+
+Series.struct accessor to with PyArrow structured data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``Series.struct`` accessor provides attributes and methods for processing
+data with ``struct[pyarrow]`` dtype Series. For example,
+:meth:`Series.struct.explode` converts PyArrow structured data to a pandas
+DataFrame. (:issue:`54938`)
+
+.. ipython:: python
+
+ import pyarrow as pa
+ series = pd.Series(
+ [
+ {"project": "pandas", "version": "2.2.0"},
+ {"project": "numpy", "version": "1.25.2"},
+ {"project": "pyarrow", "version": "13.0.0"},
+ ],
+ dtype=pd.ArrowDtype(
+ pa.struct([
+ ("project", pa.string()),
+ ("version", pa.string()),
+ ])
+ ),
+ )
+ series.struct.explode()
+
+.. _whatsnew_220.enhancements.list_accessor:
+
+Series.list accessor for PyArrow list data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``Series.list`` accessor provides attributes and methods for processing
+data with ``list[pyarrow]`` dtype Series. For example,
+:meth:`Series.list.__getitem__` allows indexing pyarrow lists in
+a Series. (:issue:`55323`)
+
+.. ipython:: python
+
+ import pyarrow as pa
+ series = pd.Series(
+ [
+ [1, 2, 3],
+ [4, 5],
+ [6],
+ ],
+ dtype=pd.ArrowDtype(
+ pa.list_(pa.int64())
+ ),
+ )
+ series.list[0]
+
+.. _whatsnew_220.enhancements.calamine:
+
+Calamine engine for :func:`read_excel`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``calamine`` engine was added to :func:`read_excel`.
+It uses ``python-calamine``, which provides Python bindings for the Rust library `calamine <https://crates.io/crates/calamine>`__.
+This engine supports Excel files (``.xlsx``, ``.xlsm``, ``.xls``, ``.xlsb``) and OpenDocument spreadsheets (``.ods``) (:issue:`50395`).
+
+There are two advantages of this engine:
+
+1. Calamine is often faster than other engines, some benchmarks show results up to 5x faster than 'openpyxl', 20x - 'odf', 4x - 'pyxlsb', and 1.5x - 'xlrd'.
+ But, 'openpyxl' and 'pyxlsb' are faster in reading a few rows from large files because of lazy iteration over rows.
+2. Calamine supports the recognition of datetime in ``.xlsb`` files, unlike 'pyxlsb' which is the only other engine in pandas that can read ``.xlsb`` files.
+
+.. code-block:: python
+
+ pd.read_excel("path_to_file.xlsb", engine="calamine")
+
+
+For more, see :ref:`io.calamine` in the user guide on IO tools.
+
.. _whatsnew_220.enhancements.other:
Other enhancements
| Reodering this for potential impact for users, e.g.
- ADBC driver should go first
- read_excel improvements are probably relatively low-key for most users, so I put it at the end.
These blocks are relatively long, so we should make sure that the more impactful items go first | https://api.github.com/repos/pandas-dev/pandas/pulls/56196 | 2023-11-26T23:13:34Z | 2023-11-27T02:32:58Z | 2023-11-27T02:32:58Z | 2023-11-27T11:10:15Z |
BUG: read_json not handling string dtype when converting to dates | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index d252c19a95d4a..5cb99afdcb98d 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -530,6 +530,7 @@ I/O
- Bug in :func:`read_csv` where ``on_bad_lines="warn"`` would write to ``stderr`` instead of raise a Python warning. This now yields a :class:`.errors.ParserWarning` (:issue:`54296`)
- Bug in :func:`read_csv` with ``engine="pyarrow"`` where ``usecols`` wasn't working with a csv with no headers (:issue:`54459`)
- Bug in :func:`read_excel`, with ``engine="xlrd"`` (``xls`` files) erroring when file contains NaNs/Infs (:issue:`54564`)
+- Bug in :func:`read_json` not handling dtype conversion properly if ``infer_string`` is set (:issue:`56195`)
- Bug in :func:`to_excel`, with ``OdsWriter`` (``ods`` files) writing boolean/string value (:issue:`54994`)
- Bug in :meth:`DataFrame.to_hdf` and :func:`read_hdf` with ``datetime64`` dtypes with non-nanosecond resolution failing to round-trip correctly (:issue:`55622`)
- Bug in :meth:`pandas.read_excel` with ``engine="odf"`` (``ods`` files) when string contains annotation (:issue:`55200`)
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index e17fcea0aae71..9c56089560507 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -32,7 +32,10 @@
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import check_dtype_backend
-from pandas.core.dtypes.common import ensure_str
+from pandas.core.dtypes.common import (
+ ensure_str,
+ is_string_dtype,
+)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import ABCIndex
@@ -1249,7 +1252,7 @@ def _try_convert_data(
if self.dtype_backend is not lib.no_default and not isinstance(data, ABCIndex):
# Fall through for conversion later on
return data, True
- elif data.dtype == "object":
+ elif is_string_dtype(data.dtype):
# try float
try:
data = data.astype("float64")
@@ -1301,6 +1304,10 @@ def _try_convert_to_date(self, data):
return data, False
new_data = data
+
+ if new_data.dtype == "string":
+ new_data = new_data.astype(object)
+
if new_data.dtype == "object":
try:
new_data = data.astype("int64")
diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py
index 410c20bb22d1e..ff7d34c85c015 100644
--- a/pandas/tests/io/json/test_compression.py
+++ b/pandas/tests/io/json/test_compression.py
@@ -93,27 +93,31 @@ def test_read_unsupported_compression_type():
pd.read_json(path, compression="unsupported")
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
@pytest.mark.parametrize("to_infer", [True, False])
@pytest.mark.parametrize("read_infer", [True, False])
def test_to_json_compression(
- compression_only, read_infer, to_infer, compression_to_extension
+ compression_only, read_infer, to_infer, compression_to_extension, infer_string
):
- # see gh-15008
- compression = compression_only
+ with pd.option_context("future.infer_string", infer_string):
+ # see gh-15008
+ compression = compression_only
- # We'll complete file extension subsequently.
- filename = "test."
- filename += compression_to_extension[compression]
+ # We'll complete file extension subsequently.
+ filename = "test."
+ filename += compression_to_extension[compression]
- df = pd.DataFrame({"A": [1]})
+ df = pd.DataFrame({"A": [1]})
- to_compression = "infer" if to_infer else compression
- read_compression = "infer" if read_infer else compression
+ to_compression = "infer" if to_infer else compression
+ read_compression = "infer" if read_infer else compression
- with tm.ensure_clean(filename) as path:
- df.to_json(path, compression=to_compression)
- result = pd.read_json(path, compression=read_compression)
- tm.assert_frame_equal(result, df)
+ with tm.ensure_clean(filename) as path:
+ df.to_json(path, compression=to_compression)
+ result = pd.read_json(path, compression=read_compression)
+ tm.assert_frame_equal(result, df)
def test_to_json_compression_mode(compression):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56195 | 2023-11-26T23:09:57Z | 2023-11-27T17:30:36Z | 2023-11-27T17:30:36Z | 2023-11-27T17:33:28Z |
BUG: hdf can't deal with ea dtype columns | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 77ce303dc1bfe..0cbf211305d12 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -24,6 +24,7 @@ Bug fixes
- Bug in :class:`Series` constructor raising DeprecationWarning when ``index`` is a list of :class:`Series` (:issue:`55228`)
- Bug in :meth:`Index.__getitem__` returning wrong result for Arrow dtypes and negative stepsize (:issue:`55832`)
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
+- Fixed bug in :meth:`DataFrame.to_hdf` raising when columns have ``StringDtype`` (:issue:`55088`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 9e0e3686e4aa2..50611197ad7dd 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -57,7 +57,6 @@
is_bool_dtype,
is_complex_dtype,
is_list_like,
- is_object_dtype,
is_string_dtype,
needs_i8_conversion,
)
@@ -2647,7 +2646,7 @@ class DataIndexableCol(DataCol):
is_data_indexable = True
def validate_names(self) -> None:
- if not is_object_dtype(Index(self.values).dtype):
+ if not is_string_dtype(Index(self.values).dtype):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 6c24843f18d0d..baac90e52e962 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -531,3 +531,18 @@ def test_round_trip_equals(tmp_path, setup_path):
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
+
+
+def test_infer_string_columns(tmp_path, setup_path):
+ # GH#
+ pytest.importorskip("pyarrow")
+ path = tmp_path / setup_path
+ with pd.option_context("future.infer_string", True):
+ df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index(
+ ["A", "B"]
+ )
+ expected = df.copy()
+ df.to_hdf(path, key="df", format="table")
+
+ result = read_hdf(path, "df")
+ tm.assert_frame_equal(result, expected)
| - [ ] closes #55088 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56194 | 2023-11-26T22:23:53Z | 2023-11-27T02:43:05Z | 2023-11-27T02:43:05Z | 2023-11-27T11:09:32Z |
Adjust test in excel folder for new string option | diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index abbdb77efad0e..caa2da1b6123b 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -14,6 +14,8 @@
import numpy as np
import pytest
+from pandas._config import using_pyarrow_string_dtype
+
import pandas.util._test_decorators as td
import pandas as pd
@@ -637,6 +639,9 @@ def test_dtype_backend_and_dtype(self, read_ext):
)
tm.assert_frame_equal(result, df)
+ @pytest.mark.xfail(
+ using_pyarrow_string_dtype(), reason="infer_string takes precedence"
+ )
def test_dtype_backend_string(self, read_ext, string_storage):
# GH#36712
if read_ext in (".xlsb", ".xls"):
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 22cd0621fd4c4..507d7ed4bf9d0 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -709,7 +709,7 @@ def test_excel_date_datetime_format(self, ext, path):
# we need to use df_expected to check the result.
tm.assert_frame_equal(rs2, df_expected)
- def test_to_excel_interval_no_labels(self, path):
+ def test_to_excel_interval_no_labels(self, path, using_infer_string):
# see gh-19242
#
# Test writing Interval without labels.
@@ -719,7 +719,9 @@ def test_to_excel_interval_no_labels(self, path):
expected = df.copy()
df["new"] = pd.cut(df[0], 10)
- expected["new"] = pd.cut(expected[0], 10).astype(str)
+ expected["new"] = pd.cut(expected[0], 10).astype(
+ str if not using_infer_string else "string[pyarrow_numpy]"
+ )
df.to_excel(path, sheet_name="test1")
with ExcelFile(path) as reader:
@@ -1213,10 +1215,9 @@ def test_render_as_column_name(self, path):
def test_true_and_false_value_options(self, path):
# see gh-13347
- df = DataFrame([["foo", "bar"]], columns=["col1", "col2"])
- msg = "Downcasting behavior in `replace`"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df.replace({"foo": True, "bar": False})
+ df = DataFrame([["foo", "bar"]], columns=["col1", "col2"], dtype=object)
+ with option_context("future.no_silent_downcasting", True):
+ expected = df.replace({"foo": True, "bar": False}).astype("bool")
df.to_excel(path)
read_frame = pd.read_excel(
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56193 | 2023-11-26T22:12:04Z | 2023-11-27T02:44:42Z | 2023-11-27T02:44:42Z | 2023-11-27T11:09:23Z |
Adjust tests in extension folder for new string option | diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index 5ba65ceaeeada..c7b768f6e3c88 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -59,7 +59,12 @@ def test_check_dtype(self, data):
# check equivalency for using .dtypes
df = pd.DataFrame(
- {"A": pd.Series(data, dtype=dtype), "B": data, "C": "foo", "D": 1}
+ {
+ "A": pd.Series(data, dtype=dtype),
+ "B": data,
+ "C": pd.Series(["foo"] * len(data), dtype=object),
+ "D": 1,
+ }
)
result = df.dtypes == str(dtype)
assert np.dtype("int64") != "Int64"
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 5c21c4f7137a5..4e8221f67a74d 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -21,7 +21,12 @@ class BaseGroupbyTests:
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
- {"A": ["B", "B", None, None, "A", "A", "B", "C"], "B": data_for_grouping}
+ {
+ "A": pd.Series(
+ ["B", "B", None, None, "A", "A", "B", "C"], dtype=object
+ ),
+ "B": data_for_grouping,
+ }
)
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index 40cc952d44200..ffb7a24b4b390 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -44,7 +44,7 @@ def test_dropna_series(self, data_missing):
tm.assert_series_equal(result, expected)
def test_dropna_frame(self, data_missing):
- df = pd.DataFrame({"A": data_missing})
+ df = pd.DataFrame({"A": data_missing}, columns=pd.Index(["A"], dtype=object))
# defaults
result = df.dropna()
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index 40fab5ec11d7d..5cd66d8a874c7 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -5,6 +5,8 @@
import numpy as np
import pytest
+from pandas._config import using_pyarrow_string_dtype
+
from pandas.core.dtypes.common import is_string_dtype
import pandas as pd
@@ -27,13 +29,23 @@ def _get_expected_exception(
# The self.obj_bar_exc pattern isn't great in part because it can depend
# on op_name or dtypes, but we use it here for backward-compatibility.
if op_name in ["__divmod__", "__rdivmod__"]:
- return self.divmod_exc
- if isinstance(obj, pd.Series) and isinstance(other, pd.Series):
- return self.series_array_exc
+ result = self.divmod_exc
+ elif isinstance(obj, pd.Series) and isinstance(other, pd.Series):
+ result = self.series_array_exc
elif isinstance(obj, pd.Series):
- return self.series_scalar_exc
+ result = self.series_scalar_exc
else:
- return self.frame_scalar_exc
+ result = self.frame_scalar_exc
+
+ if using_pyarrow_string_dtype() and result is not None:
+ import pyarrow as pa
+
+ result = ( # type: ignore[assignment]
+ result,
+ pa.lib.ArrowNotImplementedError,
+ NotImplementedError,
+ )
+ return result
def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):
# In _check_op we check that the result of a pointwise operation
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 067b401ce2f23..187da89729f0e 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -351,11 +351,11 @@ def test_setitem_preserves_views(self, data):
def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
- df = expected = pd.DataFrame({"data": pd.Series(data)})
+ df = expected = pd.DataFrame({0: pd.Series(data)})
result = pd.DataFrame(index=df.index)
key = full_indexer(df)
- result.loc[key, "data"] = df["data"]
+ result.loc[key, 0] = df[0]
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 5cde5df4bc007..6f33b18b19c51 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -18,6 +18,8 @@
import numpy as np
import pytest
+from pandas._config import using_pyarrow_string_dtype
+
import pandas as pd
from pandas import Categorical
import pandas._testing as tm
@@ -100,7 +102,9 @@ def test_contains(self, data, data_missing):
if na_value_obj is na_value:
continue
assert na_value_obj not in data
- assert na_value_obj in data_missing # this line differs from super method
+ # this section suffers from super method
+ if not using_pyarrow_string_dtype():
+ assert na_value_obj in data_missing
def test_empty(self, dtype):
cls = dtype.construct_array_type()
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index f1939ea174841..c0692064cfaec 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -196,7 +196,7 @@ def test_series_constructor_scalar_with_index(self, data, dtype):
class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
- def test_check_dtype(self, data, request):
+ def test_check_dtype(self, data, request, using_infer_string):
if data.dtype.numpy_dtype == "object":
request.applymarker(
pytest.mark.xfail(
@@ -429,7 +429,7 @@ def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
if data.dtype.numpy_dtype != object:
if not isinstance(key, slice) or key != slice(None):
expected = pd.DataFrame({"data": data.to_numpy()})
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected, check_column_type=False)
@skip_nested
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56191 | 2023-11-26T21:11:49Z | 2023-12-08T23:30:40Z | 2023-12-08T23:30:40Z | 2023-12-17T20:17:22Z |
Adjust tests for apply folder for new string option | diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index 2d7549e09a986..b7eac6b8f0ea1 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1464,13 +1464,16 @@ def test_apply_datetime_tz_issue(engine, request):
@pytest.mark.parametrize("df", [DataFrame({"A": ["a", None], "B": ["c", "d"]})])
@pytest.mark.parametrize("method", ["min", "max", "sum"])
-def test_mixed_column_raises(df, method):
+def test_mixed_column_raises(df, method, using_infer_string):
# GH 16832
if method == "sum":
- msg = r'can only concatenate str \(not "int"\) to str'
+ msg = r'can only concatenate str \(not "int"\) to str|does not support'
else:
msg = "not supported between instances of 'str' and 'float'"
- with pytest.raises(TypeError, match=msg):
+ if not using_infer_string:
+ with pytest.raises(TypeError, match=msg):
+ getattr(df, method)()
+ else:
getattr(df, method)()
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index ae5b3eef4e4fe..48dde6d42f743 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -224,9 +224,14 @@ def transform2(row):
DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]]
),
)
-def test_agg_cython_table_raises_frame(df, func, expected, axis):
+def test_agg_cython_table_raises_frame(df, func, expected, axis, using_infer_string):
# GH 21224
- msg = "can't multiply sequence by non-int of type 'str'"
+ if using_infer_string:
+ import pyarrow as pa
+
+ expected = (expected, pa.lib.ArrowNotImplementedError)
+
+ msg = "can't multiply sequence by non-int of type 'str'|has no kernel"
warn = None if isinstance(func, str) else FutureWarning
with pytest.raises(expected, match=msg):
with tm.assert_produces_warning(warn, match="using DataFrame.cumprod"):
@@ -249,11 +254,18 @@ def test_agg_cython_table_raises_frame(df, func, expected, axis):
)
),
)
-def test_agg_cython_table_raises_series(series, func, expected):
+def test_agg_cython_table_raises_series(series, func, expected, using_infer_string):
# GH21224
msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
if func == "median" or func is np.nanmedian or func is np.median:
msg = r"Cannot convert \['a' 'b' 'c'\] to numeric"
+
+ if using_infer_string:
+ import pyarrow as pa
+
+ expected = (expected, pa.lib.ArrowNotImplementedError)
+
+ msg = msg + "|does not support|has no kernel"
warn = None if isinstance(func, str) else FutureWarning
with pytest.raises(expected, match=msg):
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index 177dff2d771d4..24e48ebd4ed54 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -222,7 +222,7 @@ def f(x):
assert result == "Asia/Tokyo"
-def test_apply_categorical(by_row):
+def test_apply_categorical(by_row, using_infer_string):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = Series(values, name="XX", index=list("abcdefg"))
@@ -245,7 +245,7 @@ def test_apply_categorical(by_row):
result = ser.apply(lambda x: "A")
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
- assert result.dtype == object
+ assert result.dtype == object if not using_infer_string else "string[pyarrow_numpy]"
@pytest.mark.parametrize("series", [["1-1", "1-1", np.nan], ["1-1", "1-2", np.nan]])
| sits on #56189 | https://api.github.com/repos/pandas-dev/pandas/pulls/56190 | 2023-11-26T20:32:34Z | 2023-11-30T18:28:24Z | 2023-11-30T18:28:24Z | 2023-11-30T18:29:10Z |
BUG: numba raises for string columns or index | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index d252c19a95d4a..d4954e6caf2d0 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -496,8 +496,8 @@ Conversion
Strings
^^^^^^^
- Bug in :func:`pandas.api.types.is_string_dtype` while checking object array with no elements is of the string dtype (:issue:`54661`)
+- Bug in :meth:`DataFrame.apply` failing when ``engine="numba"`` and columns or index have ``StringDtype`` (:issue:`56189`)
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with arguments of type ``tuple[str, ...]`` for ``string[pyarrow]`` (:issue:`54942`)
--
Interval
^^^^^^^^
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 3b79882d3c762..bb3cc3a03760f 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1172,11 +1172,17 @@ def apply_with_numba(self) -> dict[int, Any]:
)
from pandas.core._numba.extensions import set_numba_data
+ index = self.obj.index
+ if index.dtype == "string":
+ index = index.astype(object)
+
+ columns = self.obj.columns
+ if columns.dtype == "string":
+ columns = columns.astype(object)
+
# Convert from numba dict to regular dict
# Our isinstance checks in the df constructor don't pass for numbas typed dict
- with set_numba_data(self.obj.index) as index, set_numba_data(
- self.columns
- ) as columns:
+ with set_numba_data(index) as index, set_numba_data(columns) as columns:
res = dict(nb_func(self.values, columns, index))
return res
diff --git a/pandas/tests/apply/test_numba.py b/pandas/tests/apply/test_numba.py
index ee239568d057d..85d7baee1bdf5 100644
--- a/pandas/tests/apply/test_numba.py
+++ b/pandas/tests/apply/test_numba.py
@@ -24,6 +24,22 @@ def test_numba_vs_python_noop(float_frame, apply_axis):
tm.assert_frame_equal(result, expected)
+def test_numba_vs_python_string_index():
+ # GH#56189
+ pytest.importorskip("pyarrow")
+ df = DataFrame(
+ 1,
+ index=Index(["a", "b"], dtype="string[pyarrow_numpy]"),
+ columns=Index(["x", "y"], dtype="string[pyarrow_numpy]"),
+ )
+ func = lambda x: x
+ result = df.apply(func, engine="numba", axis=0)
+ expected = df.apply(func, engine="python", axis=0)
+ tm.assert_frame_equal(
+ result, expected, check_column_type=False, check_index_type=False
+ )
+
+
def test_numba_vs_python_indexing():
frame = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7.0, 8.0, 9.0]},
@@ -88,7 +104,8 @@ def test_numba_unsupported_dtypes(apply_axis):
df["c"] = df["c"].astype("double[pyarrow]")
with pytest.raises(
- ValueError, match="Column b must have a numeric dtype. Found 'object' instead"
+ ValueError,
+ match="Column b must have a numeric dtype. Found 'object|string' instead",
):
df.apply(f, engine="numba", axis=apply_axis)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56189 | 2023-11-26T20:29:28Z | 2023-11-27T02:52:43Z | 2023-11-27T02:52:43Z | 2023-11-27T11:08:24Z |
Adjust tests in array folder for new string option | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 82de8ae96160f..cfa41a4e1969b 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -932,7 +932,10 @@ def value_counts_internal(
idx = Index(keys)
if idx.dtype == bool and keys.dtype == object:
idx = idx.astype(object)
- elif idx.dtype != keys.dtype:
+ elif (
+ idx.dtype != keys.dtype # noqa: PLR1714 # # pylint: disable=R1714
+ and idx.dtype != "string[pyarrow_numpy]"
+ ):
warnings.warn(
# GH#56161
"The behavior of value_counts with object-dtype is deprecated. "
diff --git a/pandas/tests/arrays/boolean/test_arithmetic.py b/pandas/tests/arrays/boolean/test_arithmetic.py
index 197e83121567e..0c4fcf149eb20 100644
--- a/pandas/tests/arrays/boolean/test_arithmetic.py
+++ b/pandas/tests/arrays/boolean/test_arithmetic.py
@@ -90,9 +90,16 @@ def test_op_int8(left_array, right_array, opname):
# -----------------------------------------------------------------------------
-def test_error_invalid_values(data, all_arithmetic_operators):
+def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string):
# invalid ops
+ if using_infer_string:
+ import pyarrow as pa
+
+ err = (TypeError, pa.lib.ArrowNotImplementedError, NotImplementedError)
+ else:
+ err = TypeError
+
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
@@ -110,9 +117,10 @@ def test_error_invalid_values(data, all_arithmetic_operators):
[
r"unsupported operand type\(s\) for",
"Concatenation operation is not implemented for NumPy arrays",
+ "has no kernel",
]
)
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(err, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
@@ -123,7 +131,9 @@ def test_error_invalid_values(data, all_arithmetic_operators):
r"unsupported operand type\(s\) for",
"can only concatenate str",
"not all arguments converted during string formatting",
+ "has no kernel",
+ "not implemented",
]
)
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(err, match=msg):
ops(pd.Series("foo", index=s.index))
diff --git a/pandas/tests/arrays/categorical/test_astype.py b/pandas/tests/arrays/categorical/test_astype.py
index 7fba150c9113f..a2a53af6ab1ad 100644
--- a/pandas/tests/arrays/categorical/test_astype.py
+++ b/pandas/tests/arrays/categorical/test_astype.py
@@ -89,7 +89,7 @@ def test_astype(self, ordered):
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
- msg = r"Cannot cast object dtype to float64"
+ msg = r"Cannot cast object|string dtype to float64"
with pytest.raises(ValueError, match=msg):
cat.astype(float)
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index e25e31e2f2e9e..50aaa42e09f22 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -6,6 +6,8 @@
import numpy as np
import pytest
+from pandas._config import using_pyarrow_string_dtype
+
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
@@ -447,6 +449,7 @@ def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
+ @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="Can't be NumPy strings")
def test_constructor_np_strs(self):
# GH#31499 Hashtable.map_locations needs to work on np.str_ objects
cat = Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")])
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index a1e50917fed98..16b941eab4830 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -92,7 +92,7 @@ def test_comparisons(self, factor):
cat > cat_unordered
# comparison (in both directions) with Series will raise
- s = Series(["b", "b", "b"])
+ s = Series(["b", "b", "b"], dtype=object)
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
@@ -108,7 +108,7 @@ def test_comparisons(self, factor):
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
- a = np.array(["b", "b", "b"])
+ a = np.array(["b", "b", "b"], dtype=object)
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
@@ -248,7 +248,7 @@ def test_comparisons(self, data, reverse, base):
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True)
)
- s = Series(base)
+ s = Series(base, dtype=object if base == list("bbb") else None)
a = np.array(base)
# comparisons need to take categories ordering into account
diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py
index dca171bf81047..d6f93fbbd912f 100644
--- a/pandas/tests/arrays/categorical/test_repr.py
+++ b/pandas/tests/arrays/categorical/test_repr.py
@@ -1,9 +1,13 @@
import numpy as np
+import pytest
+
+from pandas._config import using_pyarrow_string_dtype
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
+ Index,
Series,
date_range,
option_context,
@@ -13,11 +17,17 @@
class TestCategoricalReprWithFactor:
- def test_print(self, factor):
- expected = [
- "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']",
- "Categories (3, object): ['a' < 'b' < 'c']",
- ]
+ def test_print(self, factor, using_infer_string):
+ if using_infer_string:
+ expected = [
+ "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']",
+ "Categories (3, string): [a < b < c]",
+ ]
+ else:
+ expected = [
+ "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']",
+ "Categories (3, object): ['a' < 'b' < 'c']",
+ ]
expected = "\n".join(expected)
actual = repr(factor)
assert actual == expected
@@ -26,7 +36,7 @@ def test_print(self, factor):
class TestCategoricalRepr:
def test_big_print(self):
codes = np.array([0, 1, 2, 0, 1, 2] * 100)
- dtype = CategoricalDtype(categories=["a", "b", "c"])
+ dtype = CategoricalDtype(categories=Index(["a", "b", "c"], dtype=object))
factor = Categorical.from_codes(codes, dtype=dtype)
expected = [
"['a', 'b', 'c', 'a', 'b', ..., 'b', 'c', 'a', 'b', 'c']",
@@ -40,13 +50,13 @@ def test_big_print(self):
assert actual == expected
def test_empty_print(self):
- factor = Categorical([], ["a", "b", "c"])
+ factor = Categorical([], Index(["a", "b", "c"], dtype=object))
expected = "[], Categories (3, object): ['a', 'b', 'c']"
actual = repr(factor)
assert actual == expected
assert expected == actual
- factor = Categorical([], ["a", "b", "c"], ordered=True)
+ factor = Categorical([], Index(["a", "b", "c"], dtype=object), ordered=True)
expected = "[], Categories (3, object): ['a' < 'b' < 'c']"
actual = repr(factor)
assert expected == actual
@@ -66,6 +76,10 @@ def test_print_none_width(self):
with option_context("display.width", None):
assert exp == repr(a)
+ @pytest.mark.skipif(
+ using_pyarrow_string_dtype(),
+ reason="Change once infer_string is set to True by default",
+ )
def test_unicode_print(self):
c = Categorical(["aaaaa", "bb", "cccc"] * 20)
expected = """\
diff --git a/pandas/tests/arrays/floating/test_arithmetic.py b/pandas/tests/arrays/floating/test_arithmetic.py
index 056c22d8c1131..ba081bd01062a 100644
--- a/pandas/tests/arrays/floating/test_arithmetic.py
+++ b/pandas/tests/arrays/floating/test_arithmetic.py
@@ -122,11 +122,18 @@ def test_arith_zero_dim_ndarray(other):
# -----------------------------------------------------------------------------
-def test_error_invalid_values(data, all_arithmetic_operators):
+def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string):
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
+ if using_infer_string:
+ import pyarrow as pa
+
+ errs = (TypeError, pa.lib.ArrowNotImplementedError, NotImplementedError)
+ else:
+ errs = TypeError
+
# invalid scalars
msg = "|".join(
[
@@ -140,15 +147,17 @@ def test_error_invalid_values(data, all_arithmetic_operators):
"ufunc '.*' not supported for the input types, and the inputs could not",
"ufunc '.*' did not contain a loop with signature matching types",
"Concatenation operation is not implemented for NumPy arrays",
+ "has no kernel",
+ "not implemented",
]
)
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops("foo")
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops(pd.Series("foo", index=s.index))
msg = "|".join(
@@ -167,9 +176,11 @@ def test_error_invalid_values(data, all_arithmetic_operators):
),
r"ufunc 'add' cannot use operands with types dtype\('float\d{2}'\)",
"cannot subtract DatetimeArray from ndarray",
+ "has no kernel",
+ "not implemented",
]
)
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
diff --git a/pandas/tests/arrays/integer/test_arithmetic.py b/pandas/tests/arrays/integer/test_arithmetic.py
index ce6c245cd0f37..d979dd445a61a 100644
--- a/pandas/tests/arrays/integer/test_arithmetic.py
+++ b/pandas/tests/arrays/integer/test_arithmetic.py
@@ -172,11 +172,18 @@ def test_numpy_zero_dim_ndarray(other):
# -----------------------------------------------------------------------------
-def test_error_invalid_values(data, all_arithmetic_operators):
+def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string):
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
+ if using_infer_string:
+ import pyarrow as pa
+
+ errs = (TypeError, pa.lib.ArrowNotImplementedError, NotImplementedError)
+ else:
+ errs = TypeError
+
# invalid scalars
msg = "|".join(
[
@@ -188,20 +195,26 @@ def test_error_invalid_values(data, all_arithmetic_operators):
"ufunc '.*' not supported for the input types, and the inputs could not",
"ufunc '.*' did not contain a loop with signature matching types",
"Addition/subtraction of integers and integer-arrays with Timestamp",
+ "has no kernel",
+ "not implemented",
]
)
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops("foo")
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
str_ser = pd.Series("foo", index=s.index)
# with pytest.raises(TypeError, match=msg):
- if all_arithmetic_operators in [
- "__mul__",
- "__rmul__",
- ]: # (data[~data.isna()] >= 0).all():
+ if (
+ all_arithmetic_operators
+ in [
+ "__mul__",
+ "__rmul__",
+ ]
+ and not using_infer_string
+ ): # (data[~data.isna()] >= 0).all():
res = ops(str_ser)
expected = pd.Series(["foo" * x for x in data], index=s.index)
expected = expected.fillna(np.nan)
@@ -210,7 +223,7 @@ def test_error_invalid_values(data, all_arithmetic_operators):
# more-correct than np.nan here.
tm.assert_series_equal(res, expected)
else:
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops(str_ser)
msg = "|".join(
@@ -223,9 +236,11 @@ def test_error_invalid_values(data, all_arithmetic_operators):
r"can only concatenate str \(not \"int\"\) to str",
"not all arguments converted during string",
"cannot subtract DatetimeArray from ndarray",
+ "has no kernel",
+ "not implemented",
]
)
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(errs, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
diff --git a/pandas/tests/arrays/integer/test_reduction.py b/pandas/tests/arrays/integer/test_reduction.py
index 1c91cd25ba69c..db04862e4ea07 100644
--- a/pandas/tests/arrays/integer/test_reduction.py
+++ b/pandas/tests/arrays/integer/test_reduction.py
@@ -102,7 +102,9 @@ def test_groupby_reductions(op, expected):
["all", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")],
],
)
-def test_mixed_reductions(op, expected):
+def test_mixed_reductions(op, expected, using_infer_string):
+ if op in ["any", "all"] and using_infer_string:
+ expected = expected.astype("bool")
df = DataFrame(
{
"A": ["a", "b", "b"],
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 8dcda44aa68e5..d015e899c4231 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -191,7 +191,7 @@ def test_mul(dtype):
@pytest.mark.xfail(reason="GH-28527")
def test_add_strings(dtype):
arr = pd.array(["a", "b", "c", "d"], dtype=dtype)
- df = pd.DataFrame([["t", "y", "v", "w"]])
+ df = pd.DataFrame([["t", "y", "v", "w"]], dtype=object)
assert arr.__add__(df) is NotImplemented
result = arr + df
@@ -498,10 +498,17 @@ def test_arrow_array(dtype):
@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
-def test_arrow_roundtrip(dtype, string_storage2):
+def test_arrow_roundtrip(dtype, string_storage2, request, using_infer_string):
# roundtrip possible from arrow 1.0.0
pa = pytest.importorskip("pyarrow")
+ if using_infer_string and string_storage2 != "pyarrow_numpy":
+ request.applymarker(
+ pytest.mark.xfail(
+ reason="infer_string takes precedence over string storage"
+ )
+ )
+
data = pd.array(["a", "b", None], dtype=dtype)
df = pd.DataFrame({"a": data})
table = pa.table(df)
@@ -516,10 +523,19 @@ def test_arrow_roundtrip(dtype, string_storage2):
@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
-def test_arrow_load_from_zero_chunks(dtype, string_storage2):
+def test_arrow_load_from_zero_chunks(
+ dtype, string_storage2, request, using_infer_string
+):
# GH-41040
pa = pytest.importorskip("pyarrow")
+ if using_infer_string and string_storage2 != "pyarrow_numpy":
+ request.applymarker(
+ pytest.mark.xfail(
+ reason="infer_string takes precedence over string storage"
+ )
+ )
+
data = pd.array([], dtype=dtype)
df = pd.DataFrame({"a": data})
table = pa.table(df)
diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py
index a801a845bc7be..a022dfffbdd2b 100644
--- a/pandas/tests/arrays/string_/test_string_arrow.py
+++ b/pandas/tests/arrays/string_/test_string_arrow.py
@@ -26,7 +26,9 @@ def test_eq_all_na():
tm.assert_extension_array_equal(result, expected)
-def test_config(string_storage):
+def test_config(string_storage, request, using_infer_string):
+ if using_infer_string and string_storage != "pyarrow_numpy":
+ request.applymarker(pytest.mark.xfail(reason="infer string takes precedence"))
with pd.option_context("string_storage", string_storage):
assert StringDtype().storage == string_storage
result = pd.array(["a", "b"])
@@ -101,7 +103,7 @@ def test_constructor_from_list():
assert result.dtype.storage == "pyarrow"
-def test_from_sequence_wrong_dtype_raises():
+def test_from_sequence_wrong_dtype_raises(using_infer_string):
pytest.importorskip("pyarrow")
with pd.option_context("string_storage", "python"):
ArrowStringArray._from_sequence(["a", None, "c"], dtype="string")
@@ -114,15 +116,19 @@ def test_from_sequence_wrong_dtype_raises():
ArrowStringArray._from_sequence(["a", None, "c"], dtype="string[pyarrow]")
- with pytest.raises(AssertionError, match=None):
- with pd.option_context("string_storage", "python"):
- ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype())
+ if not using_infer_string:
+ with pytest.raises(AssertionError, match=None):
+ with pd.option_context("string_storage", "python"):
+ ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype())
with pd.option_context("string_storage", "pyarrow"):
ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype())
- with pytest.raises(AssertionError, match=None):
- ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype("python"))
+ if not using_infer_string:
+ with pytest.raises(AssertionError, match=None):
+ ArrowStringArray._from_sequence(
+ ["a", None, "c"], dtype=StringDtype("python")
+ )
ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype("pyarrow"))
@@ -137,13 +143,15 @@ def test_from_sequence_wrong_dtype_raises():
with pytest.raises(AssertionError, match=None):
StringArray._from_sequence(["a", None, "c"], dtype="string[pyarrow]")
- with pd.option_context("string_storage", "python"):
- StringArray._from_sequence(["a", None, "c"], dtype=StringDtype())
-
- with pytest.raises(AssertionError, match=None):
- with pd.option_context("string_storage", "pyarrow"):
+ if not using_infer_string:
+ with pd.option_context("string_storage", "python"):
StringArray._from_sequence(["a", None, "c"], dtype=StringDtype())
+ if not using_infer_string:
+ with pytest.raises(AssertionError, match=None):
+ with pd.option_context("string_storage", "pyarrow"):
+ StringArray._from_sequence(["a", None, "c"], dtype=StringDtype())
+
StringArray._from_sequence(["a", None, "c"], dtype=StringDtype("python"))
with pytest.raises(AssertionError, match=None):
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index e2b8ebcb79a3b..b0ec2787097f0 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -440,7 +440,7 @@ def test_array_unboxes(index_or_series):
def test_array_to_numpy_na():
# GH#40638
- arr = pd.array([pd.NA, 1], dtype="string")
+ arr = pd.array([pd.NA, 1], dtype="string[python]")
result = arr.to_numpy(na_value=True, dtype=bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| sits on top of #56187 | https://api.github.com/repos/pandas-dev/pandas/pulls/56188 | 2023-11-26T20:09:46Z | 2023-12-09T23:35:25Z | 2023-12-09T23:35:25Z | 2023-12-09T23:35:52Z |
BUG: value_counts not preserving object dtype | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 77ce303dc1bfe..41355645fca26 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -26,6 +26,7 @@ Bug fixes
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
+- Fixed bug in :meth:`Series.value_counts` not preserving object dtype when ``infer_string`` is set (:issue:`56187`)
.. ---------------------------------------------------------------------------
.. _whatsnew_214.other:
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 82de8ae96160f..8e1b42f531b06 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -871,6 +871,8 @@ def value_counts_internal(
Series,
)
+ input_dtype = None if not isinstance(values, Series) else values.dtype
+
index_name = getattr(values, "name", None)
name = "proportion" if normalize else "count"
@@ -929,7 +931,7 @@ def value_counts_internal(
# For backwards compatibility, we let Index do its normal type
# inference, _except_ for if if infers from object to bool.
- idx = Index(keys)
+ idx = Index(keys, dtype=input_dtype if input_dtype != "float16" else None)
if idx.dtype == bool and keys.dtype == object:
idx = idx.astype(object)
elif idx.dtype != keys.dtype:
diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py
index 859010d9c79c6..422f0fac37f6d 100644
--- a/pandas/tests/series/methods/test_value_counts.py
+++ b/pandas/tests/series/methods/test_value_counts.py
@@ -269,3 +269,14 @@ def test_value_counts_masked(self):
[2, 1, 1], index=Index([2, 1, 3], dtype=dtype), dtype=dtype, name="count"
)
tm.assert_series_equal(result, expected)
+
+ def test_value_counts_infer_string(self):
+ # GH#56187
+ pytest.importorskip("pyarrow")
+
+ ser = Series(["a", "b"], dtype=object)
+
+ with pd.option_context("future.infer_string", True):
+ result = ser.value_counts()
+ expected = Series([1, 1], index=Index(["a", "b"], dtype=object), name="count")
+ tm.assert_series_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56187 | 2023-11-26T20:09:24Z | 2023-12-08T21:57:01Z | null | 2023-12-08T21:57:05Z |
TST/CLN: Remove makeCategoricalIndex | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 7e70db5681850..1716110b619d6 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -6,8 +6,6 @@
import pandas as pd
-from .pandas_vb_common import tm
-
try:
from pandas.api.types import union_categoricals
except ImportError:
@@ -189,7 +187,7 @@ def setup(self):
N = 10**5
ncats = 15
- self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
+ self.s_str = pd.Series(np.random.randint(0, ncats, size=N).astype(str))
self.s_str_cat = pd.Series(self.s_str, dtype="category")
with warnings.catch_warnings(record=True):
str_cat_type = pd.CategoricalDtype(set(self.s_str), ordered=True)
@@ -242,7 +240,7 @@ def time_categorical_series_is_monotonic_decreasing(self):
class Contains:
def setup(self):
N = 10**5
- self.ci = tm.makeCategoricalIndex(N)
+ self.ci = pd.CategoricalIndex(np.arange(N))
self.c = self.ci.values
self.key = self.ci.categories[0]
@@ -325,7 +323,7 @@ def time_sort_values(self):
class SearchSorted:
def setup(self):
N = 10**5
- self.ci = tm.makeCategoricalIndex(N).sort_values()
+ self.ci = pd.CategoricalIndex(np.arange(N)).sort_values()
self.c = self.ci.values
self.key = self.ci.categories[1]
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 14ee29d24800e..0744c6d4cffb4 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -39,7 +39,6 @@
from pandas import (
ArrowDtype,
Categorical,
- CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
@@ -350,36 +349,10 @@ def to_array(obj):
# Others
-def rands_array(
- nchars, size: int, dtype: NpDtype = "O", replace: bool = True
-) -> np.ndarray:
- """
- Generate an array of byte strings.
- """
- chars = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
- retval = (
- np.random.default_rng(2)
- .choice(chars, size=nchars * np.prod(size), replace=replace)
- .view((np.str_, nchars))
- .reshape(size)
- )
- return retval.astype(dtype)
-
-
def getCols(k) -> str:
return string.ascii_uppercase[:k]
-def makeCategoricalIndex(
- k: int = 10, n: int = 3, name=None, **kwargs
-) -> CategoricalIndex:
- """make a length k index or n categories"""
- x = rands_array(nchars=4, size=n, replace=False)
- return CategoricalIndex(
- Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
- )
-
-
def makeNumericIndex(k: int = 10, *, name=None, dtype: Dtype | None) -> Index:
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
@@ -1017,7 +990,6 @@ def shares_memory(left, right) -> bool:
"iat",
"iloc",
"loc",
- "makeCategoricalIndex",
"makeCustomDataframe",
"makeCustomIndex",
"makeDataFrame",
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 3205b6657439f..d886bf8167dd4 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -59,6 +59,7 @@
import pandas as pd
from pandas import (
+ CategoricalIndex,
DataFrame,
Interval,
IntervalIndex,
@@ -630,7 +631,7 @@ def _create_mi_with_dt64tz_level():
"bool-dtype": Index(np.random.default_rng(2).standard_normal(10) < 0),
"complex64": tm.makeNumericIndex(100, dtype="float64").astype("complex64"),
"complex128": tm.makeNumericIndex(100, dtype="float64").astype("complex128"),
- "categorical": tm.makeCategoricalIndex(100),
+ "categorical": CategoricalIndex(list("abcd") * 25),
"interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=101)),
"empty": Index([]),
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py
index 98113b6c41821..97f3a76311711 100644
--- a/pandas/tests/frame/methods/test_set_index.py
+++ b/pandas/tests/frame/methods/test_set_index.py
@@ -12,6 +12,7 @@
from pandas import (
Categorical,
+ CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
@@ -398,8 +399,7 @@ def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
tm.assert_frame_equal(result, expected)
def test_construction_with_categorical_index(self):
- ci = tm.makeCategoricalIndex(10)
- ci.name = "B"
+ ci = CategoricalIndex(list("ab") * 5, name="B")
# with Categorical
df = DataFrame(
diff --git a/pandas/tests/generic/test_to_xarray.py b/pandas/tests/generic/test_to_xarray.py
index 5fb432f849643..e0d79c3f15282 100644
--- a/pandas/tests/generic/test_to_xarray.py
+++ b/pandas/tests/generic/test_to_xarray.py
@@ -18,14 +18,14 @@ class TestDataFrameToXArray:
def df(self):
return DataFrame(
{
- "a": list("abc"),
- "b": list(range(1, 4)),
- "c": np.arange(3, 6).astype("u1"),
- "d": np.arange(4.0, 7.0, dtype="float64"),
- "e": [True, False, True],
- "f": Categorical(list("abc")),
- "g": date_range("20130101", periods=3),
- "h": date_range("20130101", periods=3, tz="US/Eastern"),
+ "a": list("abcd"),
+ "b": list(range(1, 5)),
+ "c": np.arange(3, 7).astype("u1"),
+ "d": np.arange(4.0, 8.0, dtype="float64"),
+ "e": [True, False, True, False],
+ "f": Categorical(list("abcd")),
+ "g": date_range("20130101", periods=4),
+ "h": date_range("20130101", periods=4, tz="US/Eastern"),
}
)
@@ -37,11 +37,11 @@ def test_to_xarray_index_types(self, index_flat, df, using_infer_string):
from xarray import Dataset
- df.index = index[:3]
+ df.index = index[:4]
df.index.name = "foo"
df.columns.name = "bar"
result = df.to_xarray()
- assert result.dims["foo"] == 3
+ assert result.dims["foo"] == 4
assert len(result.coords) == 1
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
@@ -69,10 +69,10 @@ def test_to_xarray_with_multiindex(self, df, using_infer_string):
from xarray import Dataset
# MultiIndex
- df.index = MultiIndex.from_product([["a"], range(3)], names=["one", "two"])
+ df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"])
result = df.to_xarray()
assert result.dims["one"] == 1
- assert result.dims["two"] == 3
+ assert result.dims["two"] == 4
assert len(result.coords) == 2
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index 7af4f6809ec64..142a00d32815a 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -248,7 +248,7 @@ def test_ensure_copied_data(self):
#
# Must be tested separately from other indexes because
# self.values is not an ndarray.
- index = tm.makeCategoricalIndex(10)
+ index = CategoricalIndex(list("ab") * 5)
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
@@ -261,7 +261,7 @@ def test_ensure_copied_data(self):
class TestCategoricalIndex2:
def test_view_i8(self):
# GH#25464
- ci = tm.makeCategoricalIndex(100)
+ ci = CategoricalIndex(list("ab") * 50)
msg = "When changing to a larger dtype, its size must be a divisor"
with pytest.raises(ValueError, match=msg):
ci.view("i8")
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 9d69321ff7dbb..75b1d370560be 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -68,8 +68,8 @@ def test_tab_completion_with_categorical(self):
@pytest.mark.parametrize(
"index",
[
+ Index(list("ab") * 5, dtype="category"),
Index([str(i) for i in range(10)]),
- tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56186 | 2023-11-26T19:41:03Z | 2023-11-28T17:07:35Z | 2023-11-28T17:07:35Z | 2023-11-28T17:07:38Z |
Backport PR #56152 on branch 2.1.x (BUG: translate losing object dtype with new string dtype) | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index e4f973611c578..eb28e42d303a1 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -25,7 +25,7 @@ Bug fixes
- Bug in :meth:`Index.__getitem__` returning wrong result for Arrow dtypes and negative stepsize (:issue:`55832`)
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
--
+- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
.. ---------------------------------------------------------------------------
.. _whatsnew_214.other:
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index b299f5d6deab3..e2a3b9378a4f7 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -259,6 +259,7 @@ def _wrap_result(
fill_value=np.nan,
returns_string: bool = True,
returns_bool: bool = False,
+ dtype=None,
):
from pandas import (
Index,
@@ -379,29 +380,29 @@ def cons_row(x):
out = out.get_level_values(0)
return out
else:
- return Index(result, name=name)
+ return Index(result, name=name, dtype=dtype)
else:
index = self._orig.index
# This is a mess.
- dtype: DtypeObj | str | None
+ _dtype: DtypeObj | str | None = dtype
vdtype = getattr(result, "dtype", None)
if self._is_string:
if is_bool_dtype(vdtype):
- dtype = result.dtype
+ _dtype = result.dtype
elif returns_string:
- dtype = self._orig.dtype
+ _dtype = self._orig.dtype
else:
- dtype = vdtype
- else:
- dtype = vdtype
+ _dtype = vdtype
+ elif vdtype is not None:
+ _dtype = vdtype
if expand:
cons = self._orig._constructor_expanddim
- result = cons(result, columns=name, index=index, dtype=dtype)
+ result = cons(result, columns=name, index=index, dtype=_dtype)
else:
# Must be a Series
cons = self._orig._constructor
- result = cons(result, name=name, index=index, dtype=dtype)
+ result = cons(result, name=name, index=index, dtype=_dtype)
result = result.__finalize__(self._orig, method="str")
if name is not None and result.ndim == 1:
# __finalize__ might copy over the original name, but we may
@@ -2317,7 +2318,8 @@ def translate(self, table):
dtype: object
"""
result = self._data.array._str_translate(table)
- return self._wrap_result(result)
+ dtype = object if self._data.dtype == "object" else None
+ return self._wrap_result(result, dtype=dtype)
@forbid_nonstring_types(["bytes"])
def count(self, pat, flags: int = 0):
diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py
index 78f0730d730e8..bd64a5dce3b9a 100644
--- a/pandas/tests/strings/test_find_replace.py
+++ b/pandas/tests/strings/test_find_replace.py
@@ -5,6 +5,7 @@
import pytest
from pandas.errors import PerformanceWarning
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
@@ -893,7 +894,10 @@ def test_find_nan(any_string_dtype):
# --------------------------------------------------------------------------------------
-def test_translate(index_or_series, any_string_dtype):
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
+def test_translate(index_or_series, any_string_dtype, infer_string):
obj = index_or_series(
["abcdefg", "abcc", "cdddfg", "cdefggg"], dtype=any_string_dtype
)
| Backport PR #56152: BUG: translate losing object dtype with new string dtype | https://api.github.com/repos/pandas-dev/pandas/pulls/56185 | 2023-11-26T18:25:40Z | 2023-11-26T20:11:45Z | 2023-11-26T20:11:45Z | 2023-11-26T20:11:45Z |
Adjust tests in root directory for new string option | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 0cbf211305d12..5f6514946d4a2 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -26,6 +26,7 @@ Bug fixes
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`DataFrame.to_hdf` raising when columns have ``StringDtype`` (:issue:`55088`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
+- Fixed bug in :meth:`Series.mode` not keeping object dtype when ``infer_string`` is set (:issue:`56183`)
- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 888e8cc4e7d40..1b6fa912b7dc3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2302,7 +2302,11 @@ def mode(self, dropna: bool = True) -> Series:
# Ensure index is type stable (should always use int index)
return self._constructor(
- res_values, index=range(len(res_values)), name=self.name, copy=False
+ res_values,
+ index=range(len(res_values)),
+ name=self.name,
+ copy=False,
+ dtype=self.dtype,
).__finalize__(self, method="mode")
def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation
diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py
index 4bbbcf3bf54c2..76353ab25fca6 100644
--- a/pandas/tests/series/test_reductions.py
+++ b/pandas/tests/series/test_reductions.py
@@ -51,6 +51,16 @@ def test_mode_nullable_dtype(any_numeric_ea_dtype):
tm.assert_series_equal(result, expected)
+def test_mode_infer_string():
+ # GH#56183
+ pytest.importorskip("pyarrow")
+ ser = Series(["a", "b"], dtype=object)
+ with pd.option_context("future.infer_string", True):
+ result = ser.mode()
+ expected = Series(["a", "b"], dtype=object)
+ tm.assert_series_equal(result, expected)
+
+
def test_reductions_td64_with_nat():
# GH#8617
ser = Series([0, pd.NaT], dtype="m8[ns]")
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index e5302ec9833f1..5356704cc64a2 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1946,7 +1946,7 @@ def test_timedelta_mode(self):
tm.assert_series_equal(ser.mode(), exp)
def test_mixed_dtype(self):
- exp = Series(["foo"])
+ exp = Series(["foo"], dtype=object)
ser = Series([1, "foo", "foo"])
tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values)
tm.assert_series_equal(ser.mode(), exp)
| sits on #56183 | https://api.github.com/repos/pandas-dev/pandas/pulls/56184 | 2023-11-26T18:06:06Z | 2023-11-30T17:45:44Z | 2023-11-30T17:45:44Z | 2023-11-30T17:46:31Z |
BUG: mode not preserving object dtype for string option | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 0cbf211305d12..5f6514946d4a2 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -26,6 +26,7 @@ Bug fixes
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`DataFrame.to_hdf` raising when columns have ``StringDtype`` (:issue:`55088`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
+- Fixed bug in :meth:`Series.mode` not keeping object dtype when ``infer_string`` is set (:issue:`56183`)
- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 888e8cc4e7d40..1b6fa912b7dc3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2302,7 +2302,11 @@ def mode(self, dropna: bool = True) -> Series:
# Ensure index is type stable (should always use int index)
return self._constructor(
- res_values, index=range(len(res_values)), name=self.name, copy=False
+ res_values,
+ index=range(len(res_values)),
+ name=self.name,
+ copy=False,
+ dtype=self.dtype,
).__finalize__(self, method="mode")
def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation
diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py
index 4bbbcf3bf54c2..76353ab25fca6 100644
--- a/pandas/tests/series/test_reductions.py
+++ b/pandas/tests/series/test_reductions.py
@@ -51,6 +51,16 @@ def test_mode_nullable_dtype(any_numeric_ea_dtype):
tm.assert_series_equal(result, expected)
+def test_mode_infer_string():
+ # GH#56183
+ pytest.importorskip("pyarrow")
+ ser = Series(["a", "b"], dtype=object)
+ with pd.option_context("future.infer_string", True):
+ result = ser.mode()
+ expected = Series(["a", "b"], dtype=object)
+ tm.assert_series_equal(result, expected)
+
+
def test_reductions_td64_with_nat():
# GH#8617
ser = Series([0, pd.NaT], dtype="m8[ns]")
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56183 | 2023-11-26T18:04:12Z | 2023-11-30T17:47:59Z | 2023-11-30T17:47:59Z | 2023-11-30T17:48:45Z |
Adjust tests in window folder for new string option | diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 33858e10afd75..fe2da210c6fe9 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -70,7 +70,9 @@ def tests_skip_nuisance(step):
def test_sum_object_str_raises(step):
df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"})
r = df.rolling(window=3, step=step)
- with pytest.raises(DataError, match="Cannot aggregate non-numeric type: object"):
+ with pytest.raises(
+ DataError, match="Cannot aggregate non-numeric type: object|string"
+ ):
# GH#42738, enforced in 2.0
r.sum()
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index 4fd33d54ef846..400bf10817ab8 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -1181,7 +1181,9 @@ def test_pairwise_methods(self, method, expected_data):
)
tm.assert_frame_equal(result, expected)
- expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)())
+ expected = df.groupby("A")[["B"]].apply(
+ lambda x: getattr(x.ewm(com=1.0), method)()
+ )
tm.assert_frame_equal(result, expected)
def test_times(self, times_frame):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56182 | 2023-11-26T17:55:09Z | 2023-11-27T17:42:24Z | 2023-11-27T17:42:24Z | 2023-11-27T17:42:57Z |
Adjust tests in util folder for new string option | diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index 2d3b47cd2e994..0c93ee453bb1c 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -109,12 +109,16 @@ def test_empty_dtypes(check_dtype):
@pytest.mark.parametrize("check_like", [True, False])
-def test_frame_equal_index_mismatch(check_like, obj_fixture):
+def test_frame_equal_index_mismatch(check_like, obj_fixture, using_infer_string):
+ if using_infer_string:
+ dtype = "string"
+ else:
+ dtype = "object"
msg = f"""{obj_fixture}\\.index are different
{obj_fixture}\\.index values are different \\(33\\.33333 %\\)
-\\[left\\]: Index\\(\\['a', 'b', 'c'\\], dtype='object'\\)
-\\[right\\]: Index\\(\\['a', 'b', 'd'\\], dtype='object'\\)
+\\[left\\]: Index\\(\\['a', 'b', 'c'\\], dtype='{dtype}'\\)
+\\[right\\]: Index\\(\\['a', 'b', 'd'\\], dtype='{dtype}'\\)
At positional index 2, first diff: c != d"""
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
@@ -125,12 +129,16 @@ def test_frame_equal_index_mismatch(check_like, obj_fixture):
@pytest.mark.parametrize("check_like", [True, False])
-def test_frame_equal_columns_mismatch(check_like, obj_fixture):
+def test_frame_equal_columns_mismatch(check_like, obj_fixture, using_infer_string):
+ if using_infer_string:
+ dtype = "string"
+ else:
+ dtype = "object"
msg = f"""{obj_fixture}\\.columns are different
{obj_fixture}\\.columns values are different \\(50\\.0 %\\)
-\\[left\\]: Index\\(\\['A', 'B'\\], dtype='object'\\)
-\\[right\\]: Index\\(\\['A', 'b'\\], dtype='object'\\)"""
+\\[left\\]: Index\\(\\['A', 'B'\\], dtype='{dtype}'\\)
+\\[right\\]: Index\\(\\['A', 'b'\\], dtype='{dtype}'\\)"""
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
df2 = DataFrame({"A": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"])
diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py
index 15263db9ec645..dc6efdcec380e 100644
--- a/pandas/tests/util/test_assert_index_equal.py
+++ b/pandas/tests/util/test_assert_index_equal.py
@@ -205,14 +205,18 @@ def test_index_equal_names(name1, name2):
tm.assert_index_equal(idx1, idx2)
-def test_index_equal_category_mismatch(check_categorical):
- msg = """Index are different
+def test_index_equal_category_mismatch(check_categorical, using_infer_string):
+ if using_infer_string:
+ dtype = "string"
+ else:
+ dtype = "object"
+ msg = f"""Index are different
Attribute "dtype" are different
\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False, \
-categories_dtype=object\\)
+categories_dtype={dtype}\\)
\\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \
-ordered=False, categories_dtype=object\\)"""
+ordered=False, categories_dtype={dtype}\\)"""
idx1 = Index(Categorical(["a", "b"]))
idx2 = Index(Categorical(["a", "b"], categories=["a", "b", "c"]))
diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index 12b5987cdb3de..ffc5e105d6f2f 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -214,8 +214,18 @@ def test_series_equal_numeric_values_mismatch(rtol):
tm.assert_series_equal(s1, s2, rtol=rtol)
-def test_series_equal_categorical_values_mismatch(rtol):
- msg = """Series are different
+def test_series_equal_categorical_values_mismatch(rtol, using_infer_string):
+ if using_infer_string:
+ msg = """Series are different
+
+Series values are different \\(66\\.66667 %\\)
+\\[index\\]: \\[0, 1, 2\\]
+\\[left\\]: \\['a', 'b', 'c'\\]
+Categories \\(3, string\\): \\[a, b, c\\]
+\\[right\\]: \\['a', 'c', 'b'\\]
+Categories \\(3, string\\): \\[a, b, c\\]"""
+ else:
+ msg = """Series are different
Series values are different \\(66\\.66667 %\\)
\\[index\\]: \\[0, 1, 2\\]
@@ -246,14 +256,18 @@ def test_series_equal_datetime_values_mismatch(rtol):
tm.assert_series_equal(s1, s2, rtol=rtol)
-def test_series_equal_categorical_mismatch(check_categorical):
- msg = """Attributes of Series are different
+def test_series_equal_categorical_mismatch(check_categorical, using_infer_string):
+ if using_infer_string:
+ dtype = "string"
+ else:
+ dtype = "object"
+ msg = f"""Attributes of Series are different
Attribute "dtype" are different
\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False, \
-categories_dtype=object\\)
+categories_dtype={dtype}\\)
\\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \
-ordered=False, categories_dtype=object\\)"""
+ordered=False, categories_dtype={dtype}\\)"""
s1 = Series(Categorical(["a", "b"]))
s2 = Series(Categorical(["a", "b"], categories=list("abc")))
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56181 | 2023-11-26T17:35:58Z | 2023-11-26T18:36:13Z | 2023-11-26T18:36:13Z | 2023-11-26T19:34:10Z |
Adjust tests in tseries folder for new string option | diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py
index ee8f161858b2b..75528a8b99c4d 100644
--- a/pandas/tests/tseries/frequencies/test_inference.py
+++ b/pandas/tests/tseries/frequencies/test_inference.py
@@ -431,12 +431,18 @@ def test_series_invalid_type(end):
frequencies.infer_freq(s)
-def test_series_inconvertible_string():
+def test_series_inconvertible_string(using_infer_string):
# see gh-6407
- msg = "Unknown datetime string format"
+ if using_infer_string:
+ msg = "cannot infer freq from"
- with pytest.raises(ValueError, match=msg):
- frequencies.infer_freq(Series(["foo", "bar"]))
+ with pytest.raises(TypeError, match=msg):
+ frequencies.infer_freq(Series(["foo", "bar"]))
+ else:
+ msg = "Unknown datetime string format"
+
+ with pytest.raises(ValueError, match=msg):
+ frequencies.infer_freq(Series(["foo", "bar"]))
@pytest.mark.parametrize("freq", [None, "ms"])
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56180 | 2023-11-26T17:28:06Z | 2023-11-26T18:25:44Z | 2023-11-26T18:25:44Z | 2023-11-26T18:25:56Z |
BUG: to_numeric casting to ea for new string dtype | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 0cbf211305d12..332f8da704e38 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -23,6 +23,7 @@ Bug fixes
~~~~~~~~~
- Bug in :class:`Series` constructor raising DeprecationWarning when ``index`` is a list of :class:`Series` (:issue:`55228`)
- Bug in :meth:`Index.__getitem__` returning wrong result for Arrow dtypes and negative stepsize (:issue:`55832`)
+- Fixed bug in :func:`to_numeric` converting to extension dtype for ``string[pyarrow_numpy]`` dtype (:issue:`56179`)
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`DataFrame.to_hdf` raising when columns have ``StringDtype`` (:issue:`55088`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index c5a2736d4f926..09652a7d8bc92 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -234,7 +234,8 @@ def to_numeric(
set(),
coerce_numeric=coerce_numeric,
convert_to_masked_nullable=dtype_backend is not lib.no_default
- or isinstance(values_dtype, StringDtype),
+ or isinstance(values_dtype, StringDtype)
+ and not values_dtype.storage == "pyarrow_numpy",
)
except (ValueError, TypeError):
if errors == "raise":
@@ -249,6 +250,7 @@ def to_numeric(
dtype_backend is not lib.no_default
and new_mask is None
or isinstance(values_dtype, StringDtype)
+ and not values_dtype.storage == "pyarrow_numpy"
):
new_mask = np.zeros(values.shape, dtype=np.bool_)
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index d6b085b7954db..c452382ec572b 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -4,12 +4,15 @@
from numpy import iinfo
import pytest
+import pandas.util._test_decorators as td
+
import pandas as pd
from pandas import (
ArrowDtype,
DataFrame,
Index,
Series,
+ option_context,
to_numeric,
)
import pandas._testing as tm
@@ -67,10 +70,14 @@ def test_empty(input_kwargs, result_kwargs):
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
@pytest.mark.parametrize("last_val", ["7", 7])
-def test_series(last_val):
- ser = Series(["1", "-3.14", last_val])
- result = to_numeric(ser)
+def test_series(last_val, infer_string):
+ with option_context("future.infer_string", infer_string):
+ ser = Series(["1", "-3.14", last_val])
+ result = to_numeric(ser)
expected = Series([1, -3.14, 7])
tm.assert_series_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56179 | 2023-11-26T17:24:35Z | 2023-11-30T17:46:47Z | 2023-11-30T17:46:47Z | 2023-11-30T17:47:22Z |
Adjust test in tools for new string option | diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 503032293dc81..ee209f74eb4e0 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -183,7 +183,7 @@ def test_to_datetime_format_YYYYMMDD_ignore_with_outofbounds(self, cache):
errors="ignore",
cache=cache,
)
- expected = Index(["15010101", "20150101", np.nan])
+ expected = Index(["15010101", "20150101", np.nan], dtype=object)
tm.assert_index_equal(result, expected)
def test_to_datetime_format_YYYYMMDD_coercion(self, cache):
@@ -1206,7 +1206,9 @@ def test_out_of_bounds_errors_ignore2(self):
# GH#12424
msg = "errors='ignore' is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
- res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore")
+ res = to_datetime(
+ Series(["2362-01-01", np.nan], dtype=object), errors="ignore"
+ )
exp = Series(["2362-01-01", np.nan], dtype=object)
tm.assert_series_equal(res, exp)
@@ -1489,7 +1491,7 @@ def test_datetime_invalid_index(self, values, format):
warn, match="Could not infer format", raise_on_extra_warnings=False
):
res = to_datetime(values, errors="ignore", format=format)
- tm.assert_index_equal(res, Index(values))
+ tm.assert_index_equal(res, Index(values, dtype=object))
with tm.assert_produces_warning(
warn, match="Could not infer format", raise_on_extra_warnings=False
@@ -1667,7 +1669,7 @@ def test_to_datetime_coerce_oob(self, string_arg, format, outofbounds):
"errors, expected",
[
("coerce", Index([NaT, NaT])),
- ("ignore", Index(["200622-12-31", "111111-24-11"])),
+ ("ignore", Index(["200622-12-31", "111111-24-11"], dtype=object)),
],
)
def test_to_datetime_malformed_no_raise(self, errors, expected):
@@ -2681,7 +2683,7 @@ def test_string_na_nat_conversion_malformed(self, cache):
result = to_datetime(malformed, errors="ignore", cache=cache)
# GH 21864
- expected = Index(malformed)
+ expected = Index(malformed, dtype=object)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
@@ -3670,7 +3672,7 @@ def test_to_datetime_mixed_not_necessarily_iso8601_raise():
("errors", "expected"),
[
("coerce", DatetimeIndex(["2020-01-01 00:00:00", NaT])),
- ("ignore", Index(["2020-01-01", "01-01-2000"])),
+ ("ignore", Index(["2020-01-01", "01-01-2000"], dtype=object)),
],
)
def test_to_datetime_mixed_not_necessarily_iso8601_coerce(errors, expected):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56178 | 2023-11-26T17:19:34Z | 2023-11-26T18:24:53Z | 2023-11-26T18:24:53Z | 2023-11-26T18:25:02Z |
DOC: Fix broken link for mamba installation | diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst
index 0cc1fe2629e46..7fc42f6021f00 100644
--- a/doc/source/development/contributing_environment.rst
+++ b/doc/source/development/contributing_environment.rst
@@ -86,7 +86,7 @@ Before we begin, please:
Option 1: using mamba (recommended)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-* Install `mamba <https://mamba.readthedocs.io/en/latest/installation.html>`_
+* Install `mamba <https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html>`_
* Make sure your mamba is up to date (``mamba update mamba``)
.. code-block:: none
| Prior to this change, this link:
https://pandas.pydata.org/docs/dev/development/contributing_environment.html#contributing-mamba
<img width="1051" alt="image" src="https://github.com/pandas-dev/pandas/assets/122238526/aa20e607-a764-49c2-92fc-e32a7c92fae8">
was broken:

With this change it now goes here:

- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56176 | 2023-11-26T05:04:12Z | 2023-11-26T05:05:52Z | 2023-11-26T05:05:52Z | 2023-11-27T00:18:12Z |
ENH: Allow dictionaries to be passed to pandas.Series.str.replace | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 8b8f5bf3d028c..84b6d12d71165 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -30,6 +30,7 @@ Other enhancements
^^^^^^^^^^^^^^^^^^
- :func:`DataFrame.to_excel` now raises an ``UserWarning`` when the character count in a cell exceeds Excel's limitation of 32767 characters (:issue:`56954`)
- :func:`read_stata` now returns ``datetime64`` resolutions better matching those natively stored in the stata format (:issue:`55642`)
+- Allow dictionaries to be passed to :meth:`pandas.Series.str.replace` via ``pat`` parameter (:issue:`51748`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index bd523969fba13..dc8a71c0122c5 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -1427,8 +1427,8 @@ def fullmatch(self, pat, case: bool = True, flags: int = 0, na=None):
@forbid_nonstring_types(["bytes"])
def replace(
self,
- pat: str | re.Pattern,
- repl: str | Callable,
+ pat: str | re.Pattern | dict,
+ repl: str | Callable | None = None,
n: int = -1,
case: bool | None = None,
flags: int = 0,
@@ -1442,11 +1442,14 @@ def replace(
Parameters
----------
- pat : str or compiled regex
+ pat : str, compiled regex, or a dict
String can be a character sequence or regular expression.
+ Dictionary contains <key : value> pairs of strings to be replaced
+ along with the updated value.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
+ Must have a value of None if `pat` is a dict
See :func:`re.sub`.
n : int, default -1 (all)
Number of replacements to make from start.
@@ -1480,6 +1483,7 @@ def replace(
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
+ * if `pat` is a dictionary and `repl` is not None.
Notes
-----
@@ -1489,6 +1493,15 @@ def replace(
Examples
--------
+ When `pat` is a dictionary, every key in `pat` is replaced
+ with its corresponding value:
+
+ >>> pd.Series(["A", "B", np.nan]).str.replace(pat={"A": "a", "B": "b"})
+ 0 a
+ 1 b
+ 2 NaN
+ dtype: object
+
When `pat` is a string and `regex` is True, the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
@@ -1551,8 +1564,11 @@ def replace(
2 NaN
dtype: object
"""
+ if isinstance(pat, dict) and repl is not None:
+ raise ValueError("repl cannot be used when pat is a dictionary")
+
# Check whether repl is valid (GH 13438, GH 15055)
- if not (isinstance(repl, str) or callable(repl)):
+ if not isinstance(pat, dict) and not (isinstance(repl, str) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
@@ -1572,10 +1588,17 @@ def replace(
if case is None:
case = True
- result = self._data.array._str_replace(
- pat, repl, n=n, case=case, flags=flags, regex=regex
- )
- return self._wrap_result(result)
+ res_output = self._data
+ if not isinstance(pat, dict):
+ pat = {pat: repl}
+
+ for key, value in pat.items():
+ result = res_output.array._str_replace(
+ key, value, n=n, case=case, flags=flags, regex=regex
+ )
+ res_output = self._wrap_result(result)
+
+ return res_output
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py
index 9f0994b968a47..f2233a1110059 100644
--- a/pandas/tests/strings/test_find_replace.py
+++ b/pandas/tests/strings/test_find_replace.py
@@ -355,6 +355,21 @@ def test_endswith_nullable_string_dtype(nullable_string_dtype, na):
# --------------------------------------------------------------------------------------
# str.replace
# --------------------------------------------------------------------------------------
+def test_replace_dict_invalid(any_string_dtype):
+ # GH 51914
+ series = Series(data=["A", "B_junk", "C_gunk"], name="my_messy_col")
+ msg = "repl cannot be used when pat is a dictionary"
+
+ with pytest.raises(ValueError, match=msg):
+ series.str.replace(pat={"A": "a", "B": "b"}, repl="A")
+
+
+def test_replace_dict(any_string_dtype):
+ # GH 51914
+ series = Series(data=["A", "B", "C"], name="my_messy_col")
+ new_series = series.str.replace(pat={"A": "a", "B": "b"})
+ expected = Series(data=["a", "b", "C"], name="my_messy_col")
+ tm.assert_series_equal(new_series, expected)
def test_replace(any_string_dtype):
| - [X] closes #51748
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56175 | 2023-11-26T01:04:22Z | 2024-02-12T22:27:47Z | 2024-02-12T22:27:47Z | 2024-02-17T17:19:57Z |
CI: Add 3.12 builds | diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 4c38324280528..30397632a0af6 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -26,7 +26,7 @@ jobs:
timeout-minutes: 90
strategy:
matrix:
- env_file: [actions-39.yaml, actions-310.yaml, actions-311.yaml]
+ env_file: [actions-39.yaml, actions-310.yaml, actions-311.yaml, actions-312.yaml]
# Prevent the include jobs from overriding other jobs
pattern: [""]
include:
@@ -69,6 +69,10 @@ jobs:
env_file: actions-311.yaml
pattern: "not slow and not network and not single_cpu"
pandas_copy_on_write: "1"
+ - name: "Copy-on-Write 3.12"
+ env_file: actions-312.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_copy_on_write: "1"
- name: "Copy-on-Write 3.11 (warnings)"
env_file: actions-311.yaml
pattern: "not slow and not network and not single_cpu"
@@ -190,7 +194,7 @@ jobs:
strategy:
matrix:
os: [macos-latest, windows-latest]
- env_file: [actions-39.yaml, actions-310.yaml, actions-311.yaml]
+ env_file: [actions-39.yaml, actions-310.yaml, actions-311.yaml, actions-312.yaml]
fail-fast: false
runs-on: ${{ matrix.os }}
name: ${{ format('{0} {1}', matrix.os, matrix.env_file) }}
@@ -321,7 +325,7 @@ jobs:
# To freeze this file, uncomment out the ``if: false`` condition, and migrate the jobs
# to the corresponding posix/windows-macos/sdist etc. workflows.
# Feel free to modify this comment as necessary.
- #if: false # Uncomment this to freeze the workflow, comment it to unfreeze
+ if: false # Uncomment this to freeze the workflow, comment it to unfreeze
defaults:
run:
shell: bash -eou pipefail {0}
diff --git a/ci/deps/actions-312.yaml b/ci/deps/actions-312.yaml
new file mode 100644
index 0000000000000..394b65525c791
--- /dev/null
+++ b/ci/deps/actions-312.yaml
@@ -0,0 +1,63 @@
+name: pandas-dev-312
+channels:
+ - conda-forge
+dependencies:
+ - python=3.12
+
+ # build dependencies
+ - versioneer[toml]
+ - cython>=0.29.33
+ - meson[ninja]=1.2.1
+ - meson-python=0.13.1
+
+ # test dependencies
+ - pytest>=7.3.2
+ - pytest-cov
+ - pytest-xdist>=2.2.0
+ - pytest-localserver>=0.7.1
+ - pytest-qt>=4.2.0
+ - boto3
+
+ # required dependencies
+ - python-dateutil
+ - numpy<2
+ - pytz
+
+ # optional dependencies
+ - beautifulsoup4>=4.11.2
+ - blosc>=1.21.3
+ - bottleneck>=1.3.6
+ - fastparquet>=2022.12.0
+ - fsspec>=2022.11.0
+ - html5lib>=1.1
+ - hypothesis>=6.46.1
+ - gcsfs>=2022.11.0
+ - jinja2>=3.1.2
+ - lxml>=4.9.2
+ - matplotlib>=3.6.3
+ # - numba>=0.56.4
+ - numexpr>=2.8.4
+ - odfpy>=1.4.1
+ - qtpy>=2.3.0
+ - pyqt>=5.15.9
+ - openpyxl>=3.1.0
+ - psycopg2>=2.9.6
+ - pyarrow>=10.0.1
+ - pymysql>=1.0.2
+ - pyreadstat>=1.2.0
+ # - pytables>=3.8.0
+ # - python-calamine>=0.1.6
+ - pyxlsb>=1.0.10
+ - s3fs>=2022.11.0
+ - scipy>=1.10.0
+ - sqlalchemy>=2.0.0
+ - tabulate>=0.9.0
+ - xarray>=2022.12.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=3.0.5
+ - zstandard>=0.19.0
+
+ - pip:
+ - adbc-driver-postgresql>=0.8.0
+ - adbc-driver-sqlite>=0.8.0
+ - tzdata>=2022.7
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 45215cd3b5e96..304fe824682f9 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -542,6 +542,9 @@ def test_series_pos(self, lhs, engine, parser):
def test_scalar_unary(self, engine, parser):
msg = "bad operand type for unary ~: 'float'"
+ warn = None
+ if PY312 and not (engine == "numexpr" and parser == "pandas"):
+ warn = DeprecationWarning
with pytest.raises(TypeError, match=msg):
pd.eval("~1.0", engine=engine, parser=parser)
@@ -550,8 +553,14 @@ def test_scalar_unary(self, engine, parser):
assert pd.eval("~1", parser=parser, engine=engine) == ~1
assert pd.eval("-1", parser=parser, engine=engine) == -1
assert pd.eval("+1", parser=parser, engine=engine) == +1
- assert pd.eval("~True", parser=parser, engine=engine) == ~True
- assert pd.eval("~False", parser=parser, engine=engine) == ~False
+ with tm.assert_produces_warning(
+ warn, match="Bitwise inversion", check_stacklevel=False
+ ):
+ assert pd.eval("~True", parser=parser, engine=engine) == ~True
+ with tm.assert_produces_warning(
+ warn, match="Bitwise inversion", check_stacklevel=False
+ ):
+ assert pd.eval("~False", parser=parser, engine=engine) == ~False
assert pd.eval("-True", parser=parser, engine=engine) == -True
assert pd.eval("-False", parser=parser, engine=engine) == -False
assert pd.eval("+True", parser=parser, engine=engine) == +True
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 8298d39a5eca9..7131a50956a7d 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -34,6 +34,7 @@
from pandas._libs.tslibs import timezones
from pandas.compat import (
PY311,
+ PY312,
is_ci_environment,
is_platform_windows,
pa_version_under11p0,
@@ -716,7 +717,13 @@ def test_invert(self, data, request):
reason=f"pyarrow.compute.invert does support {pa_dtype}",
)
)
- super().test_invert(data)
+ if PY312 and pa.types.is_boolean(pa_dtype):
+ with tm.assert_produces_warning(
+ DeprecationWarning, match="Bitwise inversion", check_stacklevel=False
+ ):
+ super().test_invert(data)
+ else:
+ super().test_invert(data)
@pytest.mark.parametrize("periods", [1, -2])
def test_diff(self, data, periods, request):
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index caa2da1b6123b..98d82f10375b4 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1442,7 +1442,9 @@ def test_deprecate_bytes_input(self, engine, read_ext):
"byte string, wrap it in a `BytesIO` object."
)
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ with tm.assert_produces_warning(
+ FutureWarning, match=msg, raise_on_extra_warnings=False
+ ):
with open("test1" + read_ext, "rb") as f:
pd.read_excel(f.read(), engine=engine)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 9aeac58de50bb..6576c98042333 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -287,7 +287,9 @@ def test_read_excel_parse_dates(self, ext):
date_parser = lambda x: datetime.strptime(x, "%m/%d/%Y")
with tm.assert_produces_warning(
- FutureWarning, match="use 'date_format' instead"
+ FutureWarning,
+ match="use 'date_format' instead",
+ raise_on_extra_warnings=False,
):
res = pd.read_excel(
pth,
| Some CoW warnings broke for 3.11, so we should test at least CoW for 3.12 as well | https://api.github.com/repos/pandas-dev/pandas/pulls/56174 | 2023-11-26T00:08:56Z | 2023-11-30T14:38:02Z | 2023-11-30T14:38:02Z | 2023-12-04T23:29:19Z |
CI: Add CoW builds and fix inplace warnings | diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 33b6e7a8c2340..4c38324280528 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -73,6 +73,14 @@ jobs:
env_file: actions-311.yaml
pattern: "not slow and not network and not single_cpu"
pandas_copy_on_write: "warn"
+ - name: "Copy-on-Write 3.10 (warnings)"
+ env_file: actions-310.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_copy_on_write: "warn"
+ - name: "Copy-on-Write 3.9 (warnings)"
+ env_file: actions-39.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_copy_on_write: "warn"
- name: "Pypy"
env_file: actions-pypy-39.yaml
pattern: "not slow and not network and not single_cpu"
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e4e95a973a3c1..c832d9ca257f9 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -12450,7 +12450,7 @@ def _inplace_method(self, other, op) -> Self:
"""
warn = True
if not PYPY and warn_copy_on_write():
- if sys.getrefcount(self) <= 4:
+ if sys.getrefcount(self) <= REF_COUNT + 2:
# we are probably in an inplace setitem context (e.g. df['a'] += 1)
warn = False
| related to #56172, want to check that the other pr fails and this fixes the warnings | https://api.github.com/repos/pandas-dev/pandas/pulls/56173 | 2023-11-26T00:05:53Z | 2023-11-27T11:26:40Z | 2023-11-27T11:26:40Z | 2023-11-27T11:28:50Z |
CI: Add CoW warning builds | diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 33b6e7a8c2340..4c38324280528 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -73,6 +73,14 @@ jobs:
env_file: actions-311.yaml
pattern: "not slow and not network and not single_cpu"
pandas_copy_on_write: "warn"
+ - name: "Copy-on-Write 3.10 (warnings)"
+ env_file: actions-310.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_copy_on_write: "warn"
+ - name: "Copy-on-Write 3.9 (warnings)"
+ env_file: actions-39.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_copy_on_write: "warn"
- name: "Pypy"
env_file: actions-pypy-39.yaml
pattern: "not slow and not network and not single_cpu"
| I am on 3.10 and getting a bunch of failures locally, since we have this reference tracking issues between different python versions I would rather test those in ci | https://api.github.com/repos/pandas-dev/pandas/pulls/56172 | 2023-11-26T00:00:54Z | 2023-11-27T11:26:55Z | null | 2023-11-27T11:29:03Z |
CoW: Fix warnings for eval | diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 0c99e5e7bdc54..f1fe528de06f8 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -388,17 +388,10 @@ def eval(
# we will ignore numpy warnings here; e.g. if trying
# to use a non-numeric indexer
try:
- with warnings.catch_warnings(record=True):
- warnings.filterwarnings(
- "always", "Setting a value on a view", FutureWarning
- )
- # TODO: Filter the warnings we actually care about here.
- if inplace and isinstance(target, NDFrame):
- target.loc[:, assigner] = ret
- else:
- target[ # pyright: ignore[reportGeneralTypeIssues]
- assigner
- ] = ret
+ if inplace and isinstance(target, NDFrame):
+ target.loc[:, assigner] = ret
+ else:
+ target[assigner] = ret # pyright: ignore[reportGeneralTypeIssues]
except (TypeError, IndexError) as err:
raise ValueError("Cannot assign expression output to target") from err
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 376354aedea63..eaae515c4d7d5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -650,12 +650,17 @@ def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]:
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
+ from pandas.core.series import Series
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
- clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
+ clean_column_name(k): Series(
+ v, copy=False, index=self.index, name=k
+ ).__finalize__(self)
+ for k, v in zip(self.columns, self._iter_column_arrays())
+ if not isinstance(k, int)
}
@final
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 304fe824682f9..75473b8c50f4e 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -1173,9 +1173,7 @@ def test_assignment_single_assign_new(self):
df.eval("c = a + b", inplace=True)
tm.assert_frame_equal(df, expected)
- # TODO(CoW-warn) this should not warn (DataFrame.eval creates refs to self)
- @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
- def test_assignment_single_assign_local_overlap(self, warn_copy_on_write):
+ def test_assignment_single_assign_local_overlap(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
@@ -1229,8 +1227,6 @@ def test_column_in(self):
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.xfail(reason="Unknown: Omitted test_ in name prior.")
- # TODO(CoW-warn) this should not warn (DataFrame.eval creates refs to self)
- @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
def test_assignment_not_inplace(self):
# see gh-9297
df = DataFrame(
@@ -1244,7 +1240,7 @@ def test_assignment_not_inplace(self):
expected["c"] = expected["a"] + expected["b"]
tm.assert_frame_equal(df, expected)
- def test_multi_line_expression(self):
+ def test_multi_line_expression(self, warn_copy_on_write):
# GH 11149
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
@@ -1917,8 +1913,8 @@ def test_set_inplace(using_copy_on_write, warn_copy_on_write):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result_view = df[:]
ser = df["A"]
- # with tm.assert_cow_warning(warn_copy_on_write):
- df.eval("A = B + C", inplace=True)
+ with tm.assert_cow_warning(warn_copy_on_write):
+ df.eval("A = B + C", inplace=True)
expected = DataFrame({"A": [11, 13, 15], "B": [4, 5, 6], "C": [7, 8, 9]})
tm.assert_frame_equal(df, expected)
if not using_copy_on_write:
| This is an option if eval can't do inplace on numpy under the hood
xref https://github.com/pandas-dev/pandas/issues/56019 | https://api.github.com/repos/pandas-dev/pandas/pulls/56170 | 2023-11-25T23:19:06Z | 2023-12-04T10:55:57Z | 2023-12-04T10:55:57Z | 2023-12-04T11:04:53Z |
CoW: Remove todos that aren't necessary | diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index 9f5157181843e..53de64c72674b 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -152,7 +152,7 @@ def test_transform_axis_1_raises():
# TODO(CoW-warn) should not need to warn
@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
-def test_apply_modify_traceback():
+def test_apply_modify_traceback(warn_copy_on_write):
data = DataFrame(
{
"A": [
@@ -214,7 +214,8 @@ def transform2(row):
msg = "'float' object has no attribute 'startswith'"
with pytest.raises(AttributeError, match=msg):
- data.apply(transform, axis=1)
+ with tm.assert_cow_warning(warn_copy_on_write):
+ data.apply(transform, axis=1)
@pytest.mark.parametrize(
diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py
index 89384a4ef6ba6..7d5c485958039 100644
--- a/pandas/tests/copy_view/test_constructors.py
+++ b/pandas/tests/copy_view/test_constructors.py
@@ -297,7 +297,6 @@ def test_dataframe_from_series_or_index(
if using_copy_on_write:
assert not df._mgr._has_no_reference(0)
- # TODO(CoW-warn) should not warn for an index?
with tm.assert_cow_warning(warn_copy_on_write):
df.iloc[0, 0] = data[-1]
if using_copy_on_write:
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 2e623f885b648..72b7aea3709c0 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -913,7 +913,6 @@ def test_del_frame(backend, using_copy_on_write, warn_copy_on_write):
tm.assert_frame_equal(df2, df_orig[["a", "c"]])
df2._mgr._verify_integrity()
- # TODO(CoW-warn) false positive, this should not warn?
with tm.assert_cow_warning(warn_copy_on_write and dtype_backend == "numpy"):
df.loc[0, "b"] = 200
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
| xref https://github.com/pandas-dev/pandas/issues/56019
This should warn, row is a Series and the setitem is inplace then
The Index case is a bug at the moment, creating the df is zero copy, updating the df afterwards will propagate to the index, but will be fixed with CoW
``test_del_frame`` the parent is a single block, we are viewing the same block in df2 even though we deleted column "b" | https://api.github.com/repos/pandas-dev/pandas/pulls/56169 | 2023-11-25T22:41:51Z | 2023-11-27T22:45:46Z | 2023-11-27T22:45:46Z | 2023-11-27T22:45:49Z |
CoW: Warn for cases that go through putmask | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ced930b936ba5..87c0df7164967 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10496,6 +10496,7 @@ def _where(
inplace: bool_t = False,
axis: Axis | None = None,
level=None,
+ warn: bool_t = True,
):
"""
Equivalent to public method `where`, except that `other` is not
@@ -10626,7 +10627,7 @@ def _where(
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
- new_data = self._mgr.putmask(mask=cond, new=other, align=align)
+ new_data = self._mgr.putmask(mask=cond, new=other, align=align, warn=warn)
result = self._constructor_from_mgr(new_data, axes=new_data.axes)
return self._update_inplace(result)
diff --git a/pandas/core/internals/base.py b/pandas/core/internals/base.py
index fe366c7375c6a..664856b828347 100644
--- a/pandas/core/internals/base.py
+++ b/pandas/core/internals/base.py
@@ -14,7 +14,10 @@
import numpy as np
-from pandas._config import using_copy_on_write
+from pandas._config import (
+ using_copy_on_write,
+ warn_copy_on_write,
+)
from pandas._libs import (
algos as libalgos,
@@ -49,6 +52,16 @@
)
+class _AlreadyWarned:
+ def __init__(self):
+ # This class is used on the manager level to the block level to
+ # ensure that we warn only once. The block method can update the
+ # warned_already option without returning a value to keep the
+ # interface consistent. This is only a temporary solution for
+ # CoW warnings.
+ self.warned_already = False
+
+
class DataManager(PandasObject):
# TODO share more methods/attributes
@@ -196,19 +209,26 @@ def where(self, other, cond, align: bool) -> Self:
)
@final
- def putmask(self, mask, new, align: bool = True) -> Self:
+ def putmask(self, mask, new, align: bool = True, warn: bool = True) -> Self:
if align:
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
new = extract_array(new, extract_numpy=True)
+ already_warned = None
+ if warn_copy_on_write():
+ already_warned = _AlreadyWarned()
+ if not warn:
+ already_warned.warned_already = True
+
return self.apply_with_block(
"putmask",
align_keys=align_keys,
mask=mask,
new=new,
using_cow=using_copy_on_write(),
+ already_warned=already_warned,
)
@final
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 535d18f99f0ef..a06d266870edc 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -18,6 +18,7 @@
from pandas._config import (
get_option,
using_copy_on_write,
+ warn_copy_on_write,
)
from pandas._libs import (
@@ -136,6 +137,29 @@
_dtype_obj = np.dtype("object")
+COW_WARNING_GENERAL_MSG = """\
+Setting a value on a view: behaviour will change in pandas 3.0.
+You are mutating a Series or DataFrame object, and currently this mutation will
+also have effect on other Series or DataFrame objects that share data with this
+object. In pandas 3.0 (with Copy-on-Write), updating one Series or DataFrame object
+will never modify another.
+"""
+
+
+COW_WARNING_SETITEM_MSG = """\
+Setting a value on a view: behaviour will change in pandas 3.0.
+Currently, the mutation will also have effect on the object that shares data
+with this object. For example, when setting a value in a Series that was
+extracted from a column of a DataFrame, that DataFrame will also be updated:
+
+ ser = df["col"]
+ ser[0] = 0 <--- in pandas 2, this also updates `df`
+
+In pandas 3.0 (with Copy-on-Write), updating one Series/DataFrame will never
+modify another, and thus in the example above, `df` will not be changed.
+"""
+
+
def maybe_split(meth: F) -> F:
"""
If we have a multi-column block, split and operate block-wise. Otherwise
@@ -1355,7 +1379,9 @@ def setitem(self, indexer, value, using_cow: bool = False) -> Block:
values[indexer] = casted
return self
- def putmask(self, mask, new, using_cow: bool = False) -> list[Block]:
+ def putmask(
+ self, mask, new, using_cow: bool = False, already_warned=None
+ ) -> list[Block]:
"""
putmask the data to the block; it is possible that we may create a
new dtype of block
@@ -1388,6 +1414,19 @@ def putmask(self, mask, new, using_cow: bool = False) -> list[Block]:
return [self.copy(deep=False)]
return [self]
+ if (
+ warn_copy_on_write()
+ and already_warned is not None
+ and not already_warned.warned_already
+ ):
+ if self.refs.has_reference():
+ warnings.warn(
+ COW_WARNING_GENERAL_MSG,
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ already_warned.warned_already = True
+
try:
casted = np_can_hold_element(values.dtype, new)
@@ -2020,7 +2059,9 @@ def where(
return [nb]
@final
- def putmask(self, mask, new, using_cow: bool = False) -> list[Block]:
+ def putmask(
+ self, mask, new, using_cow: bool = False, already_warned=None
+ ) -> list[Block]:
"""
See Block.putmask.__doc__
"""
@@ -2038,6 +2079,19 @@ def putmask(self, mask, new, using_cow: bool = False) -> list[Block]:
return [self.copy(deep=False)]
return [self]
+ if (
+ warn_copy_on_write()
+ and already_warned is not None
+ and not already_warned.warned_already
+ ):
+ if self.refs.has_reference():
+ warnings.warn(
+ COW_WARNING_GENERAL_MSG,
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ already_warned.warned_already = True
+
self = self._maybe_copy(using_cow, inplace=True)
values = self.values
if values.ndim == 2:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index c11150eb4c4d7..a02f31d4483b2 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -72,6 +72,8 @@
interleaved_dtype,
)
from pandas.core.internals.blocks import (
+ COW_WARNING_GENERAL_MSG,
+ COW_WARNING_SETITEM_MSG,
Block,
NumpyBlock,
ensure_block_shape,
@@ -100,29 +102,6 @@
from pandas.api.extensions import ExtensionArray
-COW_WARNING_GENERAL_MSG = """\
-Setting a value on a view: behaviour will change in pandas 3.0.
-You are mutating a Series or DataFrame object, and currently this mutation will
-also have effect on other Series or DataFrame objects that share data with this
-object. In pandas 3.0 (with Copy-on-Write), updating one Series or DataFrame object
-will never modify another.
-"""
-
-
-COW_WARNING_SETITEM_MSG = """\
-Setting a value on a view: behaviour will change in pandas 3.0.
-Currently, the mutation will also have effect on the object that shares data
-with this object. For example, when setting a value in a Series that was
-extracted from a column of a DataFrame, that DataFrame will also be updated:
-
- ser = df["col"]
- ser[0] = 0 <--- in pandas 2, this also updates `df`
-
-In pandas 3.0 (with Copy-on-Write), updating one Series/DataFrame will never
-modify another, and thus in the example above, `df` will not be changed.
-"""
-
-
class BaseBlockManager(DataManager):
"""
Core internal data structure to implement DataFrame, Series, etc.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1b6fa912b7dc3..b060645d735c6 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1320,7 +1320,7 @@ def __setitem__(self, key, value) -> None:
# otherwise with listlike other we interpret series[mask] = other
# as series[mask] = other[mask]
try:
- self._where(~key, value, inplace=True)
+ self._where(~key, value, inplace=True, warn=warn)
except InvalidIndexError:
# test_where_dups
self.iloc[key] = value
diff --git a/pandas/tests/copy_view/test_chained_assignment_deprecation.py b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
index 7b08d9b80fc9b..5c16c7e18b89f 100644
--- a/pandas/tests/copy_view/test_chained_assignment_deprecation.py
+++ b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
@@ -70,7 +70,7 @@ def test_methods_iloc_getitem_item_cache(func, args, using_copy_on_write):
@pytest.mark.parametrize(
"indexer", [0, [0, 1], slice(0, 2), np.array([True, False, True])]
)
-def test_series_setitem(indexer, using_copy_on_write):
+def test_series_setitem(indexer, using_copy_on_write, warn_copy_on_write):
# ensure we only get a single warning for those typical cases of chained
# assignment
df = DataFrame({"a": [1, 2, 3], "b": 1})
diff --git a/pandas/tests/copy_view/test_clip.py b/pandas/tests/copy_view/test_clip.py
index 13ddd479c7c67..7ed6a1f803ead 100644
--- a/pandas/tests/copy_view/test_clip.py
+++ b/pandas/tests/copy_view/test_clip.py
@@ -8,12 +8,16 @@
from pandas.tests.copy_view.util import get_array
-def test_clip_inplace_reference(using_copy_on_write):
+def test_clip_inplace_reference(using_copy_on_write, warn_copy_on_write):
df = DataFrame({"a": [1.5, 2, 3]})
df_copy = df.copy()
arr_a = get_array(df, "a")
view = df[:]
- df.clip(lower=2, inplace=True)
+ if warn_copy_on_write:
+ with tm.assert_cow_warning():
+ df.clip(lower=2, inplace=True)
+ else:
+ df.clip(lower=2, inplace=True)
if using_copy_on_write:
assert not np.shares_memory(get_array(df, "a"), arr_a)
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 72b7aea3709c0..355eb2db0ef09 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -367,10 +367,11 @@ def test_subset_set_with_mask(backend, using_copy_on_write, warn_copy_on_write):
mask = subset > 3
- # TODO(CoW-warn) should warn -> mask is a DataFrame, which ends up going through
- # DataFrame._where(..., inplace=True)
- if using_copy_on_write or warn_copy_on_write:
+ if using_copy_on_write:
subset[mask] = 0
+ elif warn_copy_on_write:
+ with tm.assert_cow_warning():
+ subset[mask] = 0
else:
with pd.option_context("chained_assignment", "warn"):
with tm.assert_produces_warning(SettingWithCopyWarning):
@@ -867,18 +868,8 @@ def test_series_subset_set_with_indexer(
and indexer.dtype.kind == "i"
):
warn = FutureWarning
- is_mask = (
- indexer_si is tm.setitem
- and isinstance(indexer, np.ndarray)
- and indexer.dtype.kind == "b"
- )
if warn_copy_on_write:
- # TODO(CoW-warn) should also warn for setting with mask
- # -> Series.__setitem__ with boolean mask ends up using Series._set_values
- # or Series._where depending on value being set
- with tm.assert_cow_warning(
- not is_mask, raise_on_extra_warnings=warn is not None
- ):
+ with tm.assert_cow_warning(raise_on_extra_warnings=warn is not None):
indexer_si(subset)[indexer] = 0
else:
with tm.assert_produces_warning(warn, match=msg):
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 806842dcab57a..ba8e4bd684198 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1407,11 +1407,12 @@ def test_items(using_copy_on_write, warn_copy_on_write):
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
-def test_putmask(using_copy_on_write, dtype):
+def test_putmask(using_copy_on_write, dtype, warn_copy_on_write):
df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)
view = df[:]
df_orig = df.copy()
- df[df == df] = 5
+ with tm.assert_cow_warning(warn_copy_on_write):
+ df[df == df] = 5
if using_copy_on_write:
assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))
@@ -1445,15 +1446,21 @@ def test_putmask_aligns_rhs_no_reference(using_copy_on_write, dtype):
@pytest.mark.parametrize(
"val, exp, warn", [(5.5, True, FutureWarning), (5, False, None)]
)
-def test_putmask_dont_copy_some_blocks(using_copy_on_write, val, exp, warn):
+def test_putmask_dont_copy_some_blocks(
+ using_copy_on_write, val, exp, warn, warn_copy_on_write
+):
df = DataFrame({"a": [1, 2], "b": 1, "c": 1.5})
view = df[:]
df_orig = df.copy()
indexer = DataFrame(
[[True, False, False], [True, False, False]], columns=list("abc")
)
- with tm.assert_produces_warning(warn, match="incompatible dtype"):
- df[indexer] = val
+ if warn_copy_on_write:
+ with tm.assert_cow_warning():
+ df[indexer] = val
+ else:
+ with tm.assert_produces_warning(warn, match="incompatible dtype"):
+ df[indexer] = val
if using_copy_on_write:
assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))
@@ -1796,13 +1803,17 @@ def test_update_frame(using_copy_on_write, warn_copy_on_write):
tm.assert_frame_equal(view, expected)
-def test_update_series(using_copy_on_write):
+def test_update_series(using_copy_on_write, warn_copy_on_write):
ser1 = Series([1.0, 2.0, 3.0])
ser2 = Series([100.0], index=[1])
ser1_orig = ser1.copy()
view = ser1[:]
- ser1.update(ser2)
+ if warn_copy_on_write:
+ with tm.assert_cow_warning():
+ ser1.update(ser2)
+ else:
+ ser1.update(ser2)
expected = Series([1.0, 100.0, 3.0])
tm.assert_series_equal(ser1, expected)
diff --git a/pandas/tests/copy_view/test_replace.py b/pandas/tests/copy_view/test_replace.py
index d11a2893becdc..3d8559a1905fc 100644
--- a/pandas/tests/copy_view/test_replace.py
+++ b/pandas/tests/copy_view/test_replace.py
@@ -279,14 +279,18 @@ def test_replace_categorical(using_copy_on_write, val):
@pytest.mark.parametrize("method", ["where", "mask"])
-def test_masking_inplace(using_copy_on_write, method):
+def test_masking_inplace(using_copy_on_write, method, warn_copy_on_write):
df = DataFrame({"a": [1.5, 2, 3]})
df_orig = df.copy()
arr_a = get_array(df, "a")
view = df[:]
method = getattr(df, method)
- method(df["a"] > 1.6, -1, inplace=True)
+ if warn_copy_on_write:
+ with tm.assert_cow_warning():
+ method(df["a"] > 1.6, -1, inplace=True)
+ else:
+ method(df["a"] > 1.6, -1, inplace=True)
if using_copy_on_write:
assert not np.shares_memory(get_array(df, "a"), arr_a)
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 4b32d3de59ca2..13e2c1a249ac2 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -729,11 +729,7 @@ def test_replace_for_new_dtypes(self, datetime_frame):
tsframe.loc[tsframe.index[:5], "A"] = np.nan
tsframe.loc[tsframe.index[-5:], "A"] = np.nan
- tsframe.loc[tsframe.index[:5], "B"] = -1e8
-
- b = tsframe["B"]
- b[b == -1e8] = np.nan
- tsframe["B"] = b
+ tsframe.loc[tsframe.index[:5], "B"] = np.nan
msg = "DataFrame.fillna with 'method' is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# TODO: what is this even testing?
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Open for suggestions how we can improve the put mask mechanism to avoid duplicate warnings
xref https://github.com/pandas-dev/pandas/issues/56019 | https://api.github.com/repos/pandas-dev/pandas/pulls/56168 | 2023-11-25T22:32:57Z | 2023-12-04T10:08:26Z | 2023-12-04T10:08:26Z | 2023-12-04T11:05:56Z |
[ENH]: Expand types allowed in Series.struct.field | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index d1481639ca5a0..39361c3505e61 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -251,6 +251,14 @@ DataFrame. (:issue:`54938`)
)
series.struct.explode()
+Use :meth:`Series.struct.field` to index into a (possible nested)
+struct field.
+
+
+.. ipython:: python
+
+ series.struct.field("project")
+
.. _whatsnew_220.enhancements.list_accessor:
Series.list accessor for PyArrow list data
diff --git a/pandas/core/arrays/arrow/accessors.py b/pandas/core/arrays/arrow/accessors.py
index 7f88267943526..124f8fb6ad8bc 100644
--- a/pandas/core/arrays/arrow/accessors.py
+++ b/pandas/core/arrays/arrow/accessors.py
@@ -6,13 +6,18 @@
ABCMeta,
abstractmethod,
)
-from typing import TYPE_CHECKING
+from typing import (
+ TYPE_CHECKING,
+ cast,
+)
from pandas.compat import (
pa_version_under10p1,
pa_version_under11p0,
)
+from pandas.core.dtypes.common import is_list_like
+
if not pa_version_under10p1:
import pyarrow as pa
import pyarrow.compute as pc
@@ -267,15 +272,27 @@ def dtypes(self) -> Series:
names = [struct.name for struct in pa_type]
return Series(types, index=Index(names))
- def field(self, name_or_index: str | int) -> Series:
+ def field(
+ self,
+ name_or_index: list[str]
+ | list[bytes]
+ | list[int]
+ | pc.Expression
+ | bytes
+ | str
+ | int,
+ ) -> Series:
"""
Extract a child field of a struct as a Series.
Parameters
----------
- name_or_index : str | int
+ name_or_index : str | bytes | int | expression | list
Name or index of the child field to extract.
+ For list-like inputs, this will index into a nested
+ struct.
+
Returns
-------
pandas.Series
@@ -285,6 +302,19 @@ def field(self, name_or_index: str | int) -> Series:
--------
Series.struct.explode : Return all child fields as a DataFrame.
+ Notes
+ -----
+ The name of the resulting Series will be set using the following
+ rules:
+
+ - For string, bytes, or integer `name_or_index` (or a list of these, for
+ a nested selection), the Series name is set to the selected
+ field's name.
+ - For a :class:`pyarrow.compute.Expression`, this is set to
+ the string form of the expression.
+ - For list-like `name_or_index`, the name will be set to the
+ name of the final field selected.
+
Examples
--------
>>> import pyarrow as pa
@@ -314,27 +344,92 @@ def field(self, name_or_index: str | int) -> Series:
1 2
2 1
Name: version, dtype: int64[pyarrow]
+
+ Or an expression
+
+ >>> import pyarrow.compute as pc
+ >>> s.struct.field(pc.field("project"))
+ 0 pandas
+ 1 pandas
+ 2 numpy
+ Name: project, dtype: string[pyarrow]
+
+ For nested struct types, you can pass a list of values to index
+ multiple levels:
+
+ >>> version_type = pa.struct([
+ ... ("major", pa.int64()),
+ ... ("minor", pa.int64()),
+ ... ])
+ >>> s = pd.Series(
+ ... [
+ ... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
+ ... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
+ ... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
+ ... ],
+ ... dtype=pd.ArrowDtype(pa.struct(
+ ... [("version", version_type), ("project", pa.string())]
+ ... ))
+ ... )
+ >>> s.struct.field(["version", "minor"])
+ 0 5
+ 1 1
+ 2 26
+ Name: minor, dtype: int64[pyarrow]
+ >>> s.struct.field([0, 0])
+ 0 1
+ 1 2
+ 2 1
+ Name: major, dtype: int64[pyarrow]
"""
from pandas import Series
+ def get_name(
+ level_name_or_index: list[str]
+ | list[bytes]
+ | list[int]
+ | pc.Expression
+ | bytes
+ | str
+ | int,
+ data: pa.ChunkedArray,
+ ):
+ if isinstance(level_name_or_index, int):
+ name = data.type.field(level_name_or_index).name
+ elif isinstance(level_name_or_index, (str, bytes)):
+ name = level_name_or_index
+ elif isinstance(level_name_or_index, pc.Expression):
+ name = str(level_name_or_index)
+ elif is_list_like(level_name_or_index):
+ # For nested input like [2, 1, 2]
+ # iteratively get the struct and field name. The last
+ # one is used for the name of the index.
+ level_name_or_index = list(reversed(level_name_or_index))
+ selected = data
+ while level_name_or_index:
+ # we need the cast, otherwise mypy complains about
+ # getting ints, bytes, or str here, which isn't possible.
+ level_name_or_index = cast(list, level_name_or_index)
+ name_or_index = level_name_or_index.pop()
+ name = get_name(name_or_index, selected)
+ selected = selected.type.field(selected.type.get_field_index(name))
+ name = selected.name
+ else:
+ raise ValueError(
+ "name_or_index must be an int, str, bytes, "
+ "pyarrow.compute.Expression, or list of those"
+ )
+ return name
+
pa_arr = self._data.array._pa_array
- if isinstance(name_or_index, int):
- index = name_or_index
- elif isinstance(name_or_index, str):
- index = pa_arr.type.get_field_index(name_or_index)
- else:
- raise ValueError(
- "name_or_index must be an int or str, "
- f"got {type(name_or_index).__name__}"
- )
+ name = get_name(name_or_index, pa_arr)
+ field_arr = pc.struct_field(pa_arr, name_or_index)
- pa_field = pa_arr.type[index]
- field_arr = pc.struct_field(pa_arr, [index])
return Series(
field_arr,
dtype=ArrowDtype(field_arr.type),
index=self._data.index,
- name=pa_field.name,
+ name=name,
)
def explode(self) -> DataFrame:
diff --git a/pandas/tests/series/accessors/test_struct_accessor.py b/pandas/tests/series/accessors/test_struct_accessor.py
index 1ec5b3b726d17..80aea75fda406 100644
--- a/pandas/tests/series/accessors/test_struct_accessor.py
+++ b/pandas/tests/series/accessors/test_struct_accessor.py
@@ -2,6 +2,11 @@
import pytest
+from pandas.compat.pyarrow import (
+ pa_version_under11p0,
+ pa_version_under13p0,
+)
+
from pandas import (
ArrowDtype,
DataFrame,
@@ -11,6 +16,7 @@
import pandas._testing as tm
pa = pytest.importorskip("pyarrow")
+pc = pytest.importorskip("pyarrow.compute")
def test_struct_accessor_dtypes():
@@ -53,6 +59,7 @@ def test_struct_accessor_dtypes():
tm.assert_series_equal(actual, expected)
+@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")
def test_struct_accessor_field():
index = Index([-100, 42, 123])
ser = Series(
@@ -94,10 +101,11 @@ def test_struct_accessor_field():
def test_struct_accessor_field_with_invalid_name_or_index():
ser = Series([], dtype=ArrowDtype(pa.struct([("field", pa.int64())])))
- with pytest.raises(ValueError, match="name_or_index must be an int or str"):
+ with pytest.raises(ValueError, match="name_or_index must be an int, str,"):
ser.struct.field(1.1)
+@pytest.mark.skipif(pa_version_under11p0, reason="pyarrow>=11.0.0 required")
def test_struct_accessor_explode():
index = Index([-100, 42, 123])
ser = Series(
@@ -148,3 +156,41 @@ def test_struct_accessor_api_for_invalid(invalid):
),
):
invalid.struct
+
+
+@pytest.mark.parametrize(
+ ["indices", "name"],
+ [
+ (0, "int_col"),
+ ([1, 2], "str_col"),
+ (pc.field("int_col"), "int_col"),
+ ("int_col", "int_col"),
+ (b"string_col", b"string_col"),
+ ([b"string_col"], "string_col"),
+ ],
+)
+@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")
+def test_struct_accessor_field_expanded(indices, name):
+ arrow_type = pa.struct(
+ [
+ ("int_col", pa.int64()),
+ (
+ "struct_col",
+ pa.struct(
+ [
+ ("int_col", pa.int64()),
+ ("float_col", pa.float64()),
+ ("str_col", pa.string()),
+ ]
+ ),
+ ),
+ (b"string_col", pa.string()),
+ ]
+ )
+
+ data = pa.array([], type=arrow_type)
+ ser = Series(data, dtype=ArrowDtype(arrow_type))
+ expected = pc.struct_field(data, indices)
+ result = ser.struct.field(indices)
+ tm.assert_equal(result.array._pa_array.combine_chunks(), expected)
+ assert result.name == name
| This expands the set of types allowed by Series.struct.field to allow those allowed by pyarrow.
Closes https://github.com/pandas-dev/pandas/issues/56065
- [x] closes #56065
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56167 | 2023-11-25T22:20:47Z | 2024-01-02T19:15:33Z | 2024-01-02T19:15:33Z | 2024-01-02T22:55:04Z |
CoW: Fix deprecation warning for chained assignment | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e4e95a973a3c1..64f1341235710 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -101,6 +101,7 @@
SettingWithCopyWarning,
_chained_assignment_method_msg,
_chained_assignment_warning_method_msg,
+ _check_cacher,
)
from pandas.util._decorators import (
deprecate_nonkeyword_arguments,
@@ -7195,7 +7196,7 @@ def fillna(
elif not PYPY and not using_copy_on_write():
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
- if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
+ if isinstance(self, ABCSeries) and _check_cacher(self):
# see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221
ref_count += 1
if ctr <= ref_count:
@@ -7477,7 +7478,7 @@ def ffill(
elif not PYPY and not using_copy_on_write():
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
- if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
+ if isinstance(self, ABCSeries) and _check_cacher(self):
# see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221
ref_count += 1
if ctr <= ref_count:
@@ -7660,7 +7661,7 @@ def bfill(
elif not PYPY and not using_copy_on_write():
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
- if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
+ if isinstance(self, ABCSeries) and _check_cacher(self):
# see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221
ref_count += 1
if ctr <= ref_count:
@@ -7826,12 +7827,12 @@ def replace(
elif not PYPY and not using_copy_on_write():
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
- if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
+ if isinstance(self, ABCSeries) and _check_cacher(self):
# in non-CoW mode, chained Series access will populate the
# `_item_cache` which results in an increased ref count not below
# the threshold, while we still need to warn. We detect this case
# of a Series derived from a DataFrame through the presence of
- # `_cacher`
+ # checking the `_cacher`
ref_count += 1
if ctr <= ref_count:
warnings.warn(
@@ -8267,7 +8268,7 @@ def interpolate(
elif not PYPY and not using_copy_on_write():
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
- if isinstance(self, ABCSeries) and hasattr(self, "_cacher"):
+ if isinstance(self, ABCSeries) and _check_cacher(self):
# see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221
ref_count += 1
if ctr <= ref_count:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a9679f22f9933..13b3423497b54 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -45,6 +45,7 @@
_chained_assignment_method_msg,
_chained_assignment_msg,
_chained_assignment_warning_method_msg,
+ _check_cacher,
)
from pandas.util._decorators import (
Appender,
@@ -3564,7 +3565,7 @@ def update(self, other: Series | Sequence | Mapping) -> None:
elif not PYPY and not using_copy_on_write():
ctr = sys.getrefcount(self)
ref_count = REF_COUNT
- if hasattr(self, "_cacher"):
+ if _check_cacher(self):
# see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221
ref_count += 1
if ctr <= ref_count:
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index e2aa9010dc109..c89e4aa2cac0f 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -516,6 +516,24 @@ class ChainedAssignmentError(Warning):
)
+def _check_cacher(obj):
+ # This is a mess, selection paths that return a view set the _cacher attribute
+ # on the Series; most of them also set _item_cache which adds 1 to our relevant
+ # reference count, but iloc does not, so we have to check if we are actually
+ # in the item cache
+ if hasattr(obj, "_cacher"):
+ parent = obj._cacher[1]()
+ # parent could be dead
+ if parent is None:
+ return False
+ if hasattr(parent, "_item_cache"):
+ if obj._cacher[0] in parent._item_cache:
+ # Check if we are actually the item from item_cache, iloc creates a
+ # new object
+ return obj is parent._item_cache[obj._cacher[0]]
+ return False
+
+
class NumExprClobberingError(NameError):
"""
Exception raised when trying to use a built-in numexpr name as a variable name.
diff --git a/pandas/tests/copy_view/test_chained_assignment_deprecation.py b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
new file mode 100644
index 0000000000000..37431f39bdaa0
--- /dev/null
+++ b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
@@ -0,0 +1,63 @@
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+def test_methods_iloc_warn(using_copy_on_write):
+ if not using_copy_on_write:
+ df = DataFrame({"a": [1, 2, 3], "b": 1})
+ with tm.assert_cow_warning(match="A value"):
+ df.iloc[:, 0].replace(1, 5, inplace=True)
+
+ with tm.assert_cow_warning(match="A value"):
+ df.iloc[:, 0].fillna(1, inplace=True)
+
+ with tm.assert_cow_warning(match="A value"):
+ df.iloc[:, 0].interpolate(inplace=True)
+
+ with tm.assert_cow_warning(match="A value"):
+ df.iloc[:, 0].ffill(inplace=True)
+
+ with tm.assert_cow_warning(match="A value"):
+ df.iloc[:, 0].bfill(inplace=True)
+
+
+@pytest.mark.parametrize(
+ "func, args",
+ [
+ ("replace", (1, 5)),
+ ("fillna", (1,)),
+ ("interpolate", ()),
+ ("bfill", ()),
+ ("ffill", ()),
+ ],
+)
+def test_methods_iloc_getitem_item_cache(func, args, using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": 1})
+ ser = df.iloc[:, 0]
+ TODO(CoW-warn) should warn about updating a view
+ getattr(ser, func)(*args, inplace=True)
+
+ # parent that holds item_cache is dead, so don't increase ref count
+ ser = df.copy()["a"]
+ getattr(ser, func)(*args, inplace=True)
+
+ df = df.copy()
+
+ df["a"] # populate the item_cache
+ ser = df.iloc[:, 0] # iloc creates a new object
+ ser.fillna(0, inplace=True)
+
+ df["a"] # populate the item_cache
+ ser = df["a"]
+ ser.fillna(0, inplace=True)
+
+ df = df.copy()
+ df["a"] # populate the item_cache
+ if using_copy_on_write:
+ with tm.raises_chained_assignment_error():
+ df["a"].fillna(0, inplace=True)
+ else:
+ with tm.assert_cow_warning(match="A value"):
+ df["a"].fillna(0, inplace=True)
diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py
index 5bde4c21cfab5..26f1311e950ef 100755
--- a/scripts/validate_unwanted_patterns.py
+++ b/scripts/validate_unwanted_patterns.py
@@ -51,6 +51,7 @@
"_chained_assignment_msg",
"_chained_assignment_method_msg",
"_chained_assignment_warning_method_msg",
+ "_check_cacher",
"_version_meson",
# The numba extensions need this to mock the iloc object
"_iLocIndexer",
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I hate the SettingWithCopyWarning mechanism...
2 cases were fixed here:
- the parent that holds the item_cache might be dead, e.g. the ref count is not increased. We have to check this when looking at the cacher
- iloc does not populate the item_cache but it sets _cacher on the new object, so we have to check if we are actually part of the item_cache
xref https://github.com/pandas-dev/pandas/issues/56019 | https://api.github.com/repos/pandas-dev/pandas/pulls/56166 | 2023-11-25T19:35:12Z | 2023-11-27T14:07:48Z | 2023-11-27T14:07:48Z | 2023-11-27T14:08:12Z |
TST: parametrize over dt64 unit | diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index a63bfbf1835a9..9014ba4b6093e 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -905,7 +905,7 @@ def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
- expected = DatetimeIndex(["NaT"] * 9, tz=tz)
+ expected = DatetimeIndex(["NaT"] * 9, tz=tz).as_unit("ns")
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
@@ -1590,13 +1590,13 @@ class TestDatetime64OverflowHandling:
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
- left = Series([Timestamp("1969-12-31")])
+ left = Series([Timestamp("1969-12-31")], dtype="M8[ns]")
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
- expected = TimedeltaIndex([NaT])
+ expected = TimedeltaIndex([NaT], dtype="m8[ns]")
expected = tm.box_expected(expected, box_with_array)
result = left - right
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index bc632209ff7e1..0130820fc3de6 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -1354,7 +1354,8 @@ def test_setitem_frame_dup_cols_dtype(self):
def test_frame_setitem_empty_dataframe(self):
# GH#28871
- df = DataFrame({"date": [datetime(2000, 1, 1)]}).set_index("date")
+ dti = DatetimeIndex(["2000-01-01"], dtype="M8[ns]", name="date")
+ df = DataFrame({"date": dti}).set_index("date")
df = df[0:0].copy()
df["3010"] = None
@@ -1363,6 +1364,6 @@ def test_frame_setitem_empty_dataframe(self):
expected = DataFrame(
[],
columns=["3010", "2010"],
- index=Index([], dtype="datetime64[ns]", name="date"),
+ index=dti[:0],
)
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py
index dcec68ab3530d..787b77a5c725a 100644
--- a/pandas/tests/frame/methods/test_quantile.py
+++ b/pandas/tests/frame/methods/test_quantile.py
@@ -623,23 +623,23 @@ def test_quantile_nan(self, interp_method, request, using_array_manager):
exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
- def test_quantile_nat(self, interp_method, request, using_array_manager):
+ def test_quantile_nat(self, interp_method, request, using_array_manager, unit):
interpolation, method = interp_method
if method == "table" and using_array_manager:
request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set."))
# full NaT column
- df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]})
+ df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]}, dtype=f"M8[{unit}]")
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
- exp = Series([pd.NaT], index=["a"], name=0.5)
+ exp = Series([pd.NaT], index=["a"], name=0.5, dtype=f"M8[{unit}]")
tm.assert_series_equal(res, exp)
res = df.quantile(
[0.5], numeric_only=False, interpolation=interpolation, method=method
)
- exp = DataFrame({"a": [pd.NaT]}, index=[0.5])
+ exp = DataFrame({"a": [pd.NaT]}, index=[0.5], dtype=f"M8[{unit}]")
tm.assert_frame_equal(res, exp)
# mixed non-null / full null column
@@ -651,20 +651,29 @@ def test_quantile_nat(self, interp_method, request, using_array_manager):
Timestamp("2012-01-03"),
],
"b": [pd.NaT, pd.NaT, pd.NaT],
- }
+ },
+ dtype=f"M8[{unit}]",
)
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
- exp = Series([Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5)
+ exp = Series(
+ [Timestamp("2012-01-02"), pd.NaT],
+ index=["a", "b"],
+ name=0.5,
+ dtype=f"M8[{unit}]",
+ )
tm.assert_series_equal(res, exp)
res = df.quantile(
[0.5], numeric_only=False, interpolation=interpolation, method=method
)
exp = DataFrame(
- [[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"]
+ [[Timestamp("2012-01-02"), pd.NaT]],
+ index=[0.5],
+ columns=["a", "b"],
+ dtype=f"M8[{unit}]",
)
tm.assert_frame_equal(res, exp)
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 20ad93e6dce4d..0d71fb0926df9 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -598,7 +598,7 @@ def test_sem(self, datetime_frame):
"C": [1.0],
"D": ["a"],
"E": Categorical(["a"], categories=["a"]),
- "F": to_datetime(["2000-1-2"]),
+ "F": pd.DatetimeIndex(["2000-01-02"], dtype="M8[ns]"),
"G": to_timedelta(["1 days"]),
},
),
@@ -610,7 +610,7 @@ def test_sem(self, datetime_frame):
"C": [np.nan],
"D": np.array([np.nan], dtype=object),
"E": Categorical([np.nan], categories=["a"]),
- "F": [pd.NaT],
+ "F": pd.DatetimeIndex([pd.NaT], dtype="M8[ns]"),
"G": to_timedelta([pd.NaT]),
},
),
@@ -621,7 +621,9 @@ def test_sem(self, datetime_frame):
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]),
- "L": to_datetime(["2000-1-2", "NaT", "NaT", "NaT"]),
+ "L": pd.DatetimeIndex(
+ ["2000-01-02", "NaT", "NaT", "NaT"], dtype="M8[ns]"
+ ),
"M": to_timedelta(["1 days", "nan", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
@@ -633,7 +635,9 @@ def test_sem(self, datetime_frame):
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]),
- "L": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
+ "L": pd.DatetimeIndex(
+ ["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
+ ),
"M": to_timedelta(["nan", "1 days", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
@@ -648,13 +652,17 @@ def test_mode_dropna(self, dropna, expected):
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, "a", np.nan],
"E": Categorical([np.nan, np.nan, "a", np.nan]),
- "F": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
+ "F": pd.DatetimeIndex(
+ ["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
+ ),
"G": to_timedelta(["1 days", "nan", "nan", "nan"]),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(["a", np.nan, "a", np.nan]),
- "L": to_datetime(["2000-1-2", "2000-1-2", "NaT", "NaT"]),
+ "L": pd.DatetimeIndex(
+ ["2000-01-02", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
+ ),
"M": to_timedelta(["1 days", "nan", "1 days", "nan"]),
"N": np.arange(4, dtype="int64"),
}
diff --git a/pandas/tests/groupby/methods/test_groupby_shift_diff.py b/pandas/tests/groupby/methods/test_groupby_shift_diff.py
index f2d40867af03a..0ce6a6462a5d8 100644
--- a/pandas/tests/groupby/methods/test_groupby_shift_diff.py
+++ b/pandas/tests/groupby/methods/test_groupby_shift_diff.py
@@ -117,10 +117,11 @@ def test_group_diff_real_frame(any_real_numpy_dtype):
[Timedelta("5 days"), Timedelta("6 days"), Timedelta("7 days")],
],
)
-def test_group_diff_datetimelike(data):
+def test_group_diff_datetimelike(data, unit):
df = DataFrame({"a": [1, 2, 2], "b": data})
+ df["b"] = df["b"].dt.as_unit(unit)
result = df.groupby("a")["b"].diff()
- expected = Series([NaT, NaT, Timedelta("1 days")], name="b")
+ expected = Series([NaT, NaT, Timedelta("1 days")], name="b").dt.as_unit(unit)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index be02c7f79ba01..d1faab9cabfba 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -98,11 +98,17 @@ def test_groupby_with_timegrouper(self):
for df in [df_original, df_reordered]:
df = df.set_index(["Date"])
+ exp_dti = date_range(
+ "20130901",
+ "20131205",
+ freq="5D",
+ name="Date",
+ inclusive="left",
+ unit=df.index.unit,
+ )
expected = DataFrame(
{"Buyer": 0, "Quantity": 0},
- index=date_range(
- "20130901", "20131205", freq="5D", name="Date", inclusive="left"
- ),
+ index=exp_dti,
)
# Cast to object to avoid implicit cast when setting entry to "CarlCarlCarl"
expected = expected.astype({"Buyer": object})
@@ -514,6 +520,7 @@ def test_groupby_groups_datetimeindex(self):
groups = grouped.groups
assert isinstance(next(iter(groups.keys())), datetime)
+ def test_groupby_groups_datetimeindex2(self):
# GH#11442
index = date_range("2015/01/01", periods=5, name="date")
df = DataFrame({"A": [5, 6, 7, 8, 9], "B": [1, 2, 3, 4, 5]}, index=index)
@@ -876,7 +883,9 @@ def test_groupby_apply_timegrouper_with_nat_dict_returns(
res = gb["Quantity"].apply(lambda x: {"foo": len(x)})
- dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date")
+ df = gb.obj
+ unit = df["Date"]._values.unit
+ dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date", unit=unit)
mi = MultiIndex.from_arrays([dti, ["foo"] * len(dti)])
expected = Series([3, 0, 0, 0, 0, 0, 2], index=mi, name="Quantity")
tm.assert_series_equal(res, expected)
@@ -890,7 +899,9 @@ def test_groupby_apply_timegrouper_with_nat_scalar_returns(
res = gb["Quantity"].apply(lambda x: x.iloc[0] if len(x) else np.nan)
- dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date")
+ df = gb.obj
+ unit = df["Date"]._values.unit
+ dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date", unit=unit)
expected = Series(
[18, np.nan, np.nan, np.nan, np.nan, np.nan, 5],
index=dti._with_freq(None),
@@ -919,9 +930,10 @@ def test_groupby_apply_timegrouper_with_nat_apply_squeeze(
with tm.assert_produces_warning(FutureWarning, match=msg):
res = gb.apply(lambda x: x["Quantity"] * 2)
+ dti = Index([Timestamp("2013-12-31")], dtype=df["Date"].dtype, name="Date")
expected = DataFrame(
[[36, 6, 6, 10, 2]],
- index=Index([Timestamp("2013-12-31")], name="Date"),
+ index=dti,
columns=Index([0, 1, 5, 2, 3], name="Quantity"),
)
tm.assert_frame_equal(res, expected)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index cf122832d86b4..a6f160d92fb66 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -115,12 +115,15 @@ def test_transform_fast2():
)
result = df.groupby("grouping").transform("first")
- dates = [
- Timestamp("2014-1-1"),
- Timestamp("2014-1-2"),
- Timestamp("2014-1-2"),
- Timestamp("2014-1-4"),
- ]
+ dates = pd.Index(
+ [
+ Timestamp("2014-1-1"),
+ Timestamp("2014-1-2"),
+ Timestamp("2014-1-2"),
+ Timestamp("2014-1-4"),
+ ],
+ dtype="M8[ns]",
+ )
expected = DataFrame(
{"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]},
columns=["f", "i", "d"],
@@ -532,7 +535,7 @@ def test_series_fast_transform_date():
Timestamp("2014-1-2"),
Timestamp("2014-1-4"),
]
- expected = Series(dates, name="d")
+ expected = Series(dates, name="d", dtype="M8[ns]")
tm.assert_series_equal(result, expected)
@@ -1204,7 +1207,9 @@ def test_groupby_transform_with_datetimes(func, values):
result = stocks.groupby(stocks["week_id"])["price"].transform(func)
- expected = Series(data=pd.to_datetime(values), index=dates, name="price")
+ expected = Series(
+ data=pd.to_datetime(values).as_unit("ns"), index=dates, name="price"
+ )
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/methods/test_astype.py b/pandas/tests/indexes/datetimes/methods/test_astype.py
index 9ee6250feeac6..c0bc6601769b1 100644
--- a/pandas/tests/indexes/datetimes/methods/test_astype.py
+++ b/pandas/tests/indexes/datetimes/methods/test_astype.py
@@ -292,7 +292,7 @@ def test_integer_index_astype_datetime(self, tz, dtype):
# GH 20997, 20964, 24559
val = [Timestamp("2018-01-01", tz=tz).as_unit("ns")._value]
result = Index(val, name="idx").astype(dtype)
- expected = DatetimeIndex(["2018-01-01"], tz=tz, name="idx")
+ expected = DatetimeIndex(["2018-01-01"], tz=tz, name="idx").as_unit("ns")
tm.assert_index_equal(result, expected)
def test_dti_astype_period(self):
@@ -312,8 +312,9 @@ class TestAstype:
def test_astype_category(self, tz):
obj = date_range("2000", periods=2, tz=tz, name="idx")
result = obj.astype("category")
+ dti = DatetimeIndex(["2000-01-01", "2000-01-02"], tz=tz).as_unit("ns")
expected = pd.CategoricalIndex(
- [Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)],
+ dti,
name="idx",
)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 73de33607ca0b..b7932715c3ac7 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -35,46 +35,43 @@ def test_getitem_slice_keeps_name(self):
dr = date_range(st, et, freq="h", name="timebucket")
assert dr[1:].name == dr.name
- def test_getitem(self):
- idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
- idx2 = date_range(
- "2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
- )
+ @pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
+ def test_getitem(self, tz):
+ idx = date_range("2011-01-01", "2011-01-31", freq="D", tz=tz, name="idx")
- for idx in [idx1, idx2]:
- result = idx[0]
- assert result == Timestamp("2011-01-01", tz=idx.tz)
+ result = idx[0]
+ assert result == Timestamp("2011-01-01", tz=idx.tz)
- result = idx[0:5]
- expected = date_range(
- "2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
+ result = idx[0:5]
+ expected = date_range(
+ "2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
- result = idx[0:10:2]
- expected = date_range(
- "2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
+ result = idx[0:10:2]
+ expected = date_range(
+ "2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
- result = idx[-20:-5:3]
- expected = date_range(
- "2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
+ result = idx[-20:-5:3]
+ expected = date_range(
+ "2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
- result = idx[4::-1]
- expected = DatetimeIndex(
- ["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
- freq="-1D",
- tz=idx.tz,
- name="idx",
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
+ result = idx[4::-1]
+ expected = DatetimeIndex(
+ ["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
+ dtype=idx.dtype,
+ freq="-1D",
+ name="idx",
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
@pytest.mark.parametrize("freq", ["B", "C"])
def test_dti_business_getitem(self, freq):
@@ -264,8 +261,8 @@ def test_take(self):
result = idx.take([3, 2, 5])
expected = DatetimeIndex(
["2011-01-04", "2011-01-03", "2011-01-06"],
+ dtype=idx.dtype,
freq=None,
- tz=idx.tz,
name="idx",
)
tm.assert_index_equal(result, expected)
@@ -274,6 +271,7 @@ def test_take(self):
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(
["2011-01-29", "2011-01-03", "2011-01-06"],
+ dtype=idx.dtype,
freq=None,
tz=idx.tz,
name="idx",
@@ -314,7 +312,7 @@ def test_take2(self, tz):
tz=tz,
name="idx",
)
- expected = DatetimeIndex(dates, freq=None, name="idx", tz=tz)
+ expected = DatetimeIndex(dates, freq=None, name="idx", dtype=idx.dtype)
taken1 = idx.take([5, 6, 8, 12])
taken2 = idx[[5, 6, 8, 12]]
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index dde680665a8bc..353026e81b390 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -249,19 +249,23 @@ def test_intersection(self, tz, sort):
# non-monotonic
base = DatetimeIndex(
["2011-01-05", "2011-01-04", "2011-01-02", "2011-01-03"], tz=tz, name="idx"
- )
+ ).as_unit("ns")
rng2 = DatetimeIndex(
["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], tz=tz, name="idx"
- )
- expected2 = DatetimeIndex(["2011-01-04", "2011-01-02"], tz=tz, name="idx")
+ ).as_unit("ns")
+ expected2 = DatetimeIndex(
+ ["2011-01-04", "2011-01-02"], tz=tz, name="idx"
+ ).as_unit("ns")
rng3 = DatetimeIndex(
["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"],
tz=tz,
name="other",
- )
- expected3 = DatetimeIndex(["2011-01-04", "2011-01-02"], tz=tz, name=None)
+ ).as_unit("ns")
+ expected3 = DatetimeIndex(
+ ["2011-01-04", "2011-01-02"], tz=tz, name=None
+ ).as_unit("ns")
# GH 7880
rng4 = date_range("7/1/2000", "7/31/2000", freq="D", tz=tz, name="idx")
@@ -350,7 +354,7 @@ def test_difference_freq(self, sort):
index = date_range("20160920", "20160925", freq="D")
other = date_range("20160921", "20160924", freq="D")
- expected = DatetimeIndex(["20160920", "20160925"], freq=None)
+ expected = DatetimeIndex(["20160920", "20160925"], dtype="M8[ns]", freq=None)
idx_diff = index.difference(other, sort)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
@@ -359,7 +363,7 @@ def test_difference_freq(self, sort):
# subset of the original range
other = date_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other, sort)
- expected = DatetimeIndex(["20160920", "20160921"], freq="D")
+ expected = DatetimeIndex(["20160920", "20160921"], dtype="M8[ns]", freq="D")
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 379c727f4ed0f..daa5b346eb4ec 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -92,7 +92,7 @@ def test_drop_dst_boundary(self):
"201710290245",
"201710290300",
],
- tz=tz,
+ dtype="M8[ns, Europe/Brussels]",
freq=freq,
ambiguous=[
True,
@@ -112,10 +112,14 @@ def test_drop_dst_boundary(self):
result = index.drop(index[0])
tm.assert_index_equal(result, expected)
- def test_date_range_localize(self):
- rng = date_range("3/11/2012 03:00", periods=15, freq="h", tz="US/Eastern")
- rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern")
- rng3 = date_range("3/11/2012 03:00", periods=15, freq="h")
+ def test_date_range_localize(self, unit):
+ rng = date_range(
+ "3/11/2012 03:00", periods=15, freq="h", tz="US/Eastern", unit=unit
+ )
+ rng2 = DatetimeIndex(
+ ["3/11/2012 03:00", "3/11/2012 04:00"], dtype=f"M8[{unit}, US/Eastern]"
+ )
+ rng3 = date_range("3/11/2012 03:00", periods=15, freq="h", unit=unit)
rng3 = rng3.tz_localize("US/Eastern")
tm.assert_index_equal(rng._with_freq(None), rng3)
@@ -129,10 +133,15 @@ def test_date_range_localize(self):
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
+ def test_date_range_localize2(self, unit):
# Right before the DST transition
- rng = date_range("3/11/2012 00:00", periods=2, freq="h", tz="US/Eastern")
+ rng = date_range(
+ "3/11/2012 00:00", periods=2, freq="h", tz="US/Eastern", unit=unit
+ )
rng2 = DatetimeIndex(
- ["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="h"
+ ["3/11/2012 00:00", "3/11/2012 01:00"],
+ dtype=f"M8[{unit}, US/Eastern]",
+ freq="h",
)
tm.assert_index_equal(rng, rng2)
exp = Timestamp("3/11/2012 00:00", tz="US/Eastern")
@@ -142,7 +151,9 @@ def test_date_range_localize(self):
assert exp.hour == 1
assert rng[1] == exp
- rng = date_range("3/11/2012 00:00", periods=10, freq="h", tz="US/Eastern")
+ rng = date_range(
+ "3/11/2012 00:00", periods=10, freq="h", tz="US/Eastern", unit=unit
+ )
assert rng[2].hour == 3
def test_timestamp_equality_different_timezones(self):
@@ -231,10 +242,10 @@ def test_dti_convert_tz_aware_datetime_datetime(self, tz):
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]
dates_aware = [conversion.localize_pydatetime(x, tz) for x in dates]
- result = DatetimeIndex(dates_aware)
+ result = DatetimeIndex(dates_aware).as_unit("ns")
assert timezones.tz_compare(result.tz, tz)
- converted = to_datetime(dates_aware, utc=True)
+ converted = to_datetime(dates_aware, utc=True).as_unit("ns")
ex_vals = np.array([Timestamp(x).as_unit("ns")._value for x in dates_aware])
tm.assert_numpy_array_equal(converted.asi8, ex_vals)
assert converted.tz is timezone.utc
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 2007a793843c9..fd03047b2c127 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -363,15 +363,18 @@ def test_get_indexer_categorical_with_nans(self):
def test_get_indexer_datetime(self):
ii = IntervalIndex.from_breaks(date_range("2018-01-01", periods=4))
- result = ii.get_indexer(DatetimeIndex(["2018-01-02"]))
+ # TODO: with mismatched resolution get_indexer currently raises;
+ # this should probably coerce?
+ target = DatetimeIndex(["2018-01-02"], dtype="M8[ns]")
+ result = ii.get_indexer(target)
expected = np.array([0], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
- result = ii.get_indexer(DatetimeIndex(["2018-01-02"]).astype(str))
+ result = ii.get_indexer(target.astype(str))
tm.assert_numpy_array_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/47772
- result = ii.get_indexer(DatetimeIndex(["2018-01-02"]).asi8)
+ result = ii.get_indexer(target.asi8)
expected = np.array([-1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py
index 37606bda9efca..d4d4a09c44d13 100644
--- a/pandas/tests/indexes/interval/test_interval_range.py
+++ b/pandas/tests/indexes/interval/test_interval_range.py
@@ -184,6 +184,9 @@ def test_no_invalid_float_truncation(self, start, end, freq):
def test_linspace_dst_transition(self, start, mid, end):
# GH 20976: linspace behavior defined from start/end/periods
# accounts for the hour gained/lost during DST transition
+ start = start.as_unit("ns")
+ mid = mid.as_unit("ns")
+ end = end.as_unit("ns")
result = interval_range(start=start, end=end, periods=2)
expected = IntervalIndex.from_breaks([start, mid, end])
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/period/methods/test_to_timestamp.py b/pandas/tests/indexes/period/methods/test_to_timestamp.py
index 7be2602135578..3867f9e3245dc 100644
--- a/pandas/tests/indexes/period/methods/test_to_timestamp.py
+++ b/pandas/tests/indexes/period/methods/test_to_timestamp.py
@@ -58,7 +58,9 @@ def test_to_timestamp_pi_nat(self):
result = index.to_timestamp("D")
expected = DatetimeIndex(
- [NaT, datetime(2011, 1, 1), datetime(2011, 2, 1)], name="idx"
+ [NaT, datetime(2011, 1, 1), datetime(2011, 2, 1)],
+ dtype="M8[ns]",
+ name="idx",
)
tm.assert_index_equal(result, expected)
assert result.name == "idx"
@@ -98,11 +100,15 @@ def test_to_timestamp_pi_mult(self):
idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="2M", name="idx")
result = idx.to_timestamp()
- expected = DatetimeIndex(["2011-01-01", "NaT", "2011-02-01"], name="idx")
+ expected = DatetimeIndex(
+ ["2011-01-01", "NaT", "2011-02-01"], dtype="M8[ns]", name="idx"
+ )
tm.assert_index_equal(result, expected)
result = idx.to_timestamp(how="E")
- expected = DatetimeIndex(["2011-02-28", "NaT", "2011-03-31"], name="idx")
+ expected = DatetimeIndex(
+ ["2011-02-28", "NaT", "2011-03-31"], dtype="M8[ns]", name="idx"
+ )
expected = expected + Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(result, expected)
@@ -110,18 +116,22 @@ def test_to_timestamp_pi_combined(self):
idx = period_range(start="2011", periods=2, freq="1D1h", name="idx")
result = idx.to_timestamp()
- expected = DatetimeIndex(["2011-01-01 00:00", "2011-01-02 01:00"], name="idx")
+ expected = DatetimeIndex(
+ ["2011-01-01 00:00", "2011-01-02 01:00"], dtype="M8[ns]", name="idx"
+ )
tm.assert_index_equal(result, expected)
result = idx.to_timestamp(how="E")
expected = DatetimeIndex(
- ["2011-01-02 00:59:59", "2011-01-03 01:59:59"], name="idx"
+ ["2011-01-02 00:59:59", "2011-01-03 01:59:59"], name="idx", dtype="M8[ns]"
)
expected = expected + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result, expected)
result = idx.to_timestamp(how="E", freq="h")
- expected = DatetimeIndex(["2011-01-02 00:00", "2011-01-03 01:00"], name="idx")
+ expected = DatetimeIndex(
+ ["2011-01-02 00:00", "2011-01-03 01:00"], dtype="M8[ns]", name="idx"
+ )
expected = expected + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index 9cf11b4602cb2..7be3d8c657766 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -5,9 +5,9 @@
from pandas import (
DataFrame,
+ DatetimeIndex,
MultiIndex,
date_range,
- to_datetime,
)
import pandas._testing as tm
@@ -219,7 +219,11 @@ def test_setitem_multiple_partial(self, multiindex_dataframe_random_data):
@pytest.mark.parametrize(
"indexer, exp_idx, exp_values",
[
- (slice("2019-2", None), [to_datetime("2019-02-01")], [2, 3]),
+ (
+ slice("2019-2", None),
+ DatetimeIndex(["2019-02-01"], dtype="M8[ns]"),
+ [2, 3],
+ ),
(
slice(None, "2019-2"),
date_range("2019", periods=2, freq="MS"),
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index e31b675efb69a..9870868a3e1e9 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -9,7 +9,6 @@
DataFrame,
MultiIndex,
Series,
- Timestamp,
date_range,
isna,
notna,
@@ -91,11 +90,11 @@ def test_setitem_multiindex3(self):
np.random.default_rng(2).random((12, 4)), index=idx, columns=cols
)
- subidx = MultiIndex.from_tuples(
- [("A", Timestamp("2015-01-01")), ("A", Timestamp("2015-02-01"))]
+ subidx = MultiIndex.from_arrays(
+ [["A", "A"], date_range("2015-01-01", "2015-02-01", freq="MS")]
)
- subcols = MultiIndex.from_tuples(
- [("foo", Timestamp("2016-01-01")), ("foo", Timestamp("2016-02-01"))]
+ subcols = MultiIndex.from_arrays(
+ [["foo", "foo"], date_range("2016-01-01", "2016-02-01", freq="MS")]
)
vals = DataFrame(
diff --git a/pandas/tests/io/xml/test_xml_dtypes.py b/pandas/tests/io/xml/test_xml_dtypes.py
index fb24902efc0f5..a85576ff13f5c 100644
--- a/pandas/tests/io/xml/test_xml_dtypes.py
+++ b/pandas/tests/io/xml/test_xml_dtypes.py
@@ -9,6 +9,7 @@
from pandas import (
DataFrame,
+ DatetimeIndex,
Series,
to_datetime,
)
@@ -146,7 +147,9 @@ def test_dtypes_with_names(parser):
"Col1": ["square", "circle", "triangle"],
"Col2": Series(["00360", "00360", "00180"]).astype("string"),
"Col3": Series([4.0, float("nan"), 3.0]).astype("Int64"),
- "Col4": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]),
+ "Col4": DatetimeIndex(
+ ["2020-01-01", "2021-01-01", "2022-01-01"], dtype="M8[ns]"
+ ),
}
)
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 4afd3b477c3ee..3cf5201d573d4 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -118,12 +118,11 @@ def test_getitem_multiple():
df = DataFrame(data, index=date_range("2016-01-01", periods=2))
r = df.groupby("id").resample("1D")
result = r["buyer"].count()
+
+ exp_mi = pd.MultiIndex.from_arrays([[1, 2], df.index], names=("id", None))
expected = Series(
[1, 1],
- index=pd.MultiIndex.from_tuples(
- [(1, Timestamp("2016-01-01")), (2, Timestamp("2016-01-02"))],
- names=["id", None],
- ),
+ index=exp_mi,
name="buyer",
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index a38e4ffe2eaf7..c4c83e2046b76 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -787,7 +787,7 @@ def test_join_datetime_string(self):
index=[2, 4],
columns=["x", "y", "z", "a"],
)
- expected["x"] = expected["x"].dt.as_unit("ns")
+ expected["x"] = expected["x"].astype("M8[ns]")
tm.assert_frame_equal(result, expected)
def test_join_with_categorical_index(self):
diff --git a/pandas/tests/series/methods/test_quantile.py b/pandas/tests/series/methods/test_quantile.py
index 016635a50fdf4..fa0563271d7df 100644
--- a/pandas/tests/series/methods/test_quantile.py
+++ b/pandas/tests/series/methods/test_quantile.py
@@ -105,8 +105,8 @@ def test_quantile_interpolation_dtype(self):
def test_quantile_nan(self):
# GH 13098
- s = Series([1, 2, 3, 4, np.nan])
- result = s.quantile(0.5)
+ ser = Series([1, 2, 3, 4, np.nan])
+ result = ser.quantile(0.5)
expected = 2.5
assert result == expected
@@ -114,14 +114,14 @@ def test_quantile_nan(self):
s1 = Series([], dtype=object)
cases = [s1, Series([np.nan, np.nan])]
- for s in cases:
- res = s.quantile(0.5)
+ for ser in cases:
+ res = ser.quantile(0.5)
assert np.isnan(res)
- res = s.quantile([0.5])
+ res = ser.quantile([0.5])
tm.assert_series_equal(res, Series([np.nan], index=[0.5]))
- res = s.quantile([0.2, 0.3])
+ res = ser.quantile([0.2, 0.3])
tm.assert_series_equal(res, Series([np.nan, np.nan], index=[0.2, 0.3]))
@pytest.mark.parametrize(
@@ -160,11 +160,11 @@ def test_quantile_nan(self):
],
)
def test_quantile_box(self, case):
- s = Series(case, name="XXX")
- res = s.quantile(0.5)
+ ser = Series(case, name="XXX")
+ res = ser.quantile(0.5)
assert res == case[1]
- res = s.quantile([0.5])
+ res = ser.quantile([0.5])
exp = Series([case[1]], index=[0.5], name="XXX")
tm.assert_series_equal(res, exp)
@@ -190,35 +190,37 @@ def test_quantile_sparse(self, values, dtype):
expected = Series(np.asarray(ser)).quantile([0.5]).astype("Sparse[float]")
tm.assert_series_equal(result, expected)
- def test_quantile_empty(self):
+ def test_quantile_empty_float64(self):
# floats
- s = Series([], dtype="float64")
+ ser = Series([], dtype="float64")
- res = s.quantile(0.5)
+ res = ser.quantile(0.5)
assert np.isnan(res)
- res = s.quantile([0.5])
+ res = ser.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
+ def test_quantile_empty_int64(self):
# int
- s = Series([], dtype="int64")
+ ser = Series([], dtype="int64")
- res = s.quantile(0.5)
+ res = ser.quantile(0.5)
assert np.isnan(res)
- res = s.quantile([0.5])
+ res = ser.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
+ def test_quantile_empty_dt64(self):
# datetime
- s = Series([], dtype="datetime64[ns]")
+ ser = Series([], dtype="datetime64[ns]")
- res = s.quantile(0.5)
+ res = ser.quantile(0.5)
assert res is pd.NaT
- res = s.quantile([0.5])
- exp = Series([pd.NaT], index=[0.5])
+ res = ser.quantile([0.5])
+ exp = Series([pd.NaT], index=[0.5], dtype=ser.dtype)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("dtype", [int, float, "Int64"])
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56165 | 2023-11-25T15:35:12Z | 2023-11-25T21:05:02Z | 2023-11-25T21:05:02Z | 2023-11-25T23:30:13Z |
TST/CLN: Remove make_rand_series | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 3a3c98f253fcc..a74fb2bf48bc4 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -456,23 +456,6 @@ def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
return pi
-# make series
-def make_rand_series(name=None, dtype=np.float64) -> Series:
- index = makeStringIndex(_N)
- data = np.random.default_rng(2).standard_normal(_N)
- with np.errstate(invalid="ignore"):
- data = data.astype(dtype, copy=False)
- return Series(data, index=index, name=name)
-
-
-def makeFloatSeries(name=None) -> Series:
- return make_rand_series(name=name)
-
-
-def makeStringSeries(name=None) -> Series:
- return make_rand_series(name=name)
-
-
def makeObjectSeries(name=None) -> Series:
data = makeStringIndex(_N)
data = Index(data, dtype=object)
@@ -1073,16 +1056,13 @@ def shares_memory(left, right) -> bool:
"makeDataFrame",
"makeDateIndex",
"makeFloatIndex",
- "makeFloatSeries",
"makeIntIndex",
"makeMixedDataFrame",
"makeNumericIndex",
"makeObjectSeries",
"makePeriodIndex",
- "make_rand_series",
"makeRangeIndex",
"makeStringIndex",
- "makeStringSeries",
"makeTimeDataFrame",
"makeTimedeltaIndex",
"makeTimeSeries",
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 4faf7faa6aa5d..350871c3085c1 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -729,9 +729,11 @@ def string_series() -> Series:
"""
Fixture for Series of floats with Index of unique strings
"""
- s = tm.makeStringSeries()
- s.name = "series"
- return s
+ return Series(
+ np.arange(30, dtype=np.float64) * 1.1,
+ index=Index([f"i_{i}" for i in range(30)], dtype=object),
+ name="series",
+ )
@pytest.fixture
@@ -776,7 +778,9 @@ def series_with_simple_index(index) -> Series:
_narrow_series = {
- f"{dtype.__name__}-series": tm.make_rand_series(name="a", dtype=dtype)
+ f"{dtype.__name__}-series": Series(
+ range(30), index=[f"i-{i}" for i in range(30)], name="a", dtype=dtype
+ )
for dtype in tm.NARROW_NP_DTYPES
}
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index 9f5157181843e..9f8611dd4b08b 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -338,11 +338,8 @@ def test_transform_wont_agg_series(string_series, func):
# we are trying to transform with an aggregator
msg = "Function did not transform"
- warn = RuntimeWarning if func[0] == "sqrt" else None
- warn_msg = "invalid value encountered in sqrt"
with pytest.raises(ValueError, match=msg):
- with tm.assert_produces_warning(warn, match=warn_msg, check_stacklevel=False):
- string_series.transform(func)
+ string_series.transform(func)
@pytest.mark.parametrize(
diff --git a/pandas/tests/base/test_misc.py b/pandas/tests/base/test_misc.py
index 15daca86b14ee..65e234e799353 100644
--- a/pandas/tests/base/test_misc.py
+++ b/pandas/tests/base/test_misc.py
@@ -132,7 +132,7 @@ def test_memory_usage_components_series(series_with_simple_index):
@pytest.mark.parametrize("dtype", tm.NARROW_NP_DTYPES)
def test_memory_usage_components_narrow_series(dtype):
- series = tm.make_rand_series(name="a", dtype=dtype)
+ series = Series(range(5), dtype=dtype, index=[f"i-{i}" for i in range(5)], name="a")
total_usage = series.memory_usage(index=True)
non_index_usage = series.memory_usage(index=False)
index_usage = series.index.memory_usage()
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index d008249db1a3f..88cec50c08aba 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -78,8 +78,6 @@ def test_notna_notnull(notna_f):
@pytest.mark.parametrize(
"ser",
[
- tm.makeFloatSeries(),
- tm.makeStringSeries(),
tm.makeObjectSeries(),
tm.makeTimeSeries(),
Series(range(5), period_range("2020-01-01", periods=5)),
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 87beab04bc586..1f08b9d5c35b8 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -316,7 +316,11 @@ class TestNDFrame:
# tests that don't fit elsewhere
@pytest.mark.parametrize(
- "ser", [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]
+ "ser",
+ [
+ Series(range(10), dtype=np.float64),
+ Series([str(i) for i in range(10)], dtype=object),
+ ],
)
def test_squeeze_series_noop(self, ser):
# noop
@@ -360,14 +364,18 @@ def test_squeeze_axis_len_3(self):
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
- s = tm.makeFloatSeries()
+ s = Series(range(2), dtype=np.float64)
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(np.squeeze(df), df["A"])
@pytest.mark.parametrize(
- "ser", [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]
+ "ser",
+ [
+ Series(range(10), dtype=np.float64),
+ Series([str(i) for i in range(10)], dtype=object),
+ ],
)
def test_transpose_series(self, ser):
# calls implementation in pandas/core/base.py
@@ -393,7 +401,11 @@ def test_numpy_transpose(self, frame_or_series):
np.transpose(obj, axes=1)
@pytest.mark.parametrize(
- "ser", [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]
+ "ser",
+ [
+ Series(range(10), dtype=np.float64),
+ Series([str(i) for i in range(10)], dtype=object),
+ ],
)
def test_take_series(self, ser):
indices = [1, 5, -2, 6, 3, -1]
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index dace8435595ee..50cf7d737eb99 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -99,7 +99,7 @@ def test_append(setup_path):
def test_append_series(setup_path):
with ensure_clean_store(setup_path) as store:
# basic
- ss = tm.makeStringSeries()
+ ss = Series(range(20), dtype=np.float64, index=[f"i_{i}" for i in range(20)])
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
diff --git a/pandas/tests/io/pytables/test_keys.py b/pandas/tests/io/pytables/test_keys.py
index 0dcc9f7f1b9c2..fd7df29595090 100644
--- a/pandas/tests/io/pytables/test_keys.py
+++ b/pandas/tests/io/pytables/test_keys.py
@@ -3,6 +3,7 @@
from pandas import (
DataFrame,
HDFStore,
+ Series,
_testing as tm,
)
from pandas.tests.io.pytables.common import (
@@ -16,7 +17,9 @@
def test_keys(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
- store["b"] = tm.makeStringSeries()
+ store["b"] = Series(
+ range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]
+ )
store["c"] = tm.makeDataFrame()
assert len(store) == 3
diff --git a/pandas/tests/io/pytables/test_read.py b/pandas/tests/io/pytables/test_read.py
index 32af61de05ee4..2030b1eca3203 100644
--- a/pandas/tests/io/pytables/test_read.py
+++ b/pandas/tests/io/pytables/test_read.py
@@ -356,7 +356,7 @@ def test_read_hdf_series_mode_r(tmp_path, format, setup_path):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
- series = tm.makeFloatSeries()
+ series = Series(range(10), dtype=np.float64)
path = tmp_path / setup_path
series.to_hdf(path, key="data", format=format)
result = read_hdf(path, key="data", mode="r")
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 4f908f28cb5e9..6c24843f18d0d 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -36,7 +36,7 @@ def roundtrip(key, obj, **kwargs):
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
- o = tm.makeStringSeries()
+ o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
@@ -249,7 +249,7 @@ def test_table_values_dtypes_roundtrip(setup_path):
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_series(setup_path):
- s = tm.makeStringSeries()
+ s = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 4624a48df18e3..96c160ab40bd8 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -103,7 +103,9 @@ def test_repr(setup_path):
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
- store["b"] = tm.makeStringSeries()
+ store["b"] = Series(
+ range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]
+ )
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 7d543d29c034d..c8b47666e1b4a 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -43,7 +43,9 @@ def ts():
@pytest.fixture
def series():
- return tm.makeStringSeries(name="series")
+ return Series(
+ range(20), dtype=np.float64, name="series", index=[f"i_{i}" for i in range(20)]
+ )
class TestSeriesPlots:
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 5a6d1af6257bb..9c4ae92224148 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -868,7 +868,7 @@ def test_idxmin_dt64index(self, unit):
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
# add some NaNs
string_series[5:15] = np.nan
@@ -901,7 +901,7 @@ def test_idxmin(self):
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
# add some NaNs
string_series[5:15] = np.nan
diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py
index 74e521ab71f41..81f560caff3fa 100644
--- a/pandas/tests/reductions/test_stat_reductions.py
+++ b/pandas/tests/reductions/test_stat_reductions.py
@@ -154,15 +154,15 @@ def _check_stat_op(
f(string_series_, numeric_only=True)
def test_sum(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
self._check_stat_op("sum", np.sum, string_series, check_allna=False)
def test_mean(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
self._check_stat_op("mean", np.mean, string_series)
def test_median(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
self._check_stat_op("median", np.median, string_series)
# test with integers, test failure
@@ -170,19 +170,19 @@ def test_median(self):
tm.assert_almost_equal(np.median(int_ts), int_ts.median())
def test_prod(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
self._check_stat_op("prod", np.prod, string_series)
def test_min(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
self._check_stat_op("min", np.min, string_series, check_objects=True)
def test_max(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
self._check_stat_op("max", np.max, string_series, check_objects=True)
def test_var_std(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
datetime_series = tm.makeTimeSeries().rename("ts")
alt = lambda x: np.std(x, ddof=1)
@@ -208,7 +208,7 @@ def test_var_std(self):
assert pd.isna(result)
def test_sem(self):
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
datetime_series = tm.makeTimeSeries().rename("ts")
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
@@ -228,7 +228,7 @@ def test_sem(self):
def test_skew(self):
sp_stats = pytest.importorskip("scipy.stats")
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
alt = lambda x: sp_stats.skew(x, bias=False)
self._check_stat_op("skew", alt, string_series)
@@ -250,7 +250,7 @@ def test_skew(self):
def test_kurt(self):
sp_stats = pytest.importorskip("scipy.stats")
- string_series = tm.makeStringSeries().rename("series")
+ string_series = Series(range(20), dtype=np.float64, name="series")
alt = lambda x: sp_stats.kurtosis(x, bias=False)
self._check_stat_op("kurt", alt, string_series)
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 6471cd71f0860..f38a29bfe7e88 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -47,7 +47,11 @@ class TestSeriesFlexArithmetic:
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
- (lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
+ (
+ lambda x: Series(range(10), dtype=np.float64),
+ lambda x: Series(range(10), dtype=np.float64),
+ True,
+ ),
],
)
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/test_unary.py b/pandas/tests/series/test_unary.py
index ad0e344fa4420..8f153788e413c 100644
--- a/pandas/tests/series/test_unary.py
+++ b/pandas/tests/series/test_unary.py
@@ -8,13 +8,11 @@ class TestSeriesUnaryOps:
# __neg__, __pos__, __invert__
def test_neg(self):
- ser = tm.makeStringSeries()
- ser.name = "series"
+ ser = Series(range(5), dtype="float64", name="series")
tm.assert_series_equal(-ser, -1 * ser)
def test_invert(self):
- ser = tm.makeStringSeries()
- ser.name = "series"
+ ser = Series(range(5), dtype="float64", name="series")
tm.assert_series_equal(-(ser < 0), ~(ser < 0))
@pytest.mark.parametrize(
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56163 | 2023-11-25T02:13:44Z | 2023-11-26T18:35:26Z | 2023-11-26T18:35:26Z | 2023-11-26T18:35:31Z |
DOC: Add note about CoW change to copy keyword docs | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5d05983529fba..b86e9e08f2cce 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -369,6 +369,18 @@
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
@@ -3728,6 +3740,18 @@ def transpose(self, *args, copy: bool = False) -> DataFrame:
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
DataFrame
@@ -5577,6 +5601,18 @@ def rename(
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.
If True then value of copy is ignored.
@@ -12095,6 +12131,18 @@ def to_timestamp(
copy : bool, default True
If False then underlying input data is not copied.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
DataFrame
@@ -12161,6 +12209,18 @@ def to_period(
copy : bool, default True
If False then underlying input data is not copied.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
DataFrame
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e4e95a973a3c1..f20253cf12907 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -455,6 +455,18 @@ def set_flags(
----------
copy : bool, default False
Specify if a copy of the object should be made.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
allows_duplicate_labels : bool, optional
Whether the returned object allows duplicate labels.
@@ -741,7 +753,17 @@ def set_axis(
copy : bool, default True
Whether to make a copy of the underlying data.
- .. versionadded:: 1.5.0
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
Returns
-------
@@ -1172,6 +1194,18 @@ def rename_axis(
The axis to rename. For `Series` this parameter is unused and defaults to 0.
copy : bool, default None
Also copy underlying data.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
@@ -4597,6 +4631,18 @@ def reindex_like(
copy : bool, default True
Return a new object, even if the passed indexes are the same.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
@@ -5343,6 +5389,18 @@ def reindex(
copy : bool, default True
Return a new object, even if the passed indexes are the same.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
@@ -6775,6 +6833,18 @@ def infer_objects(self, copy: bool_t | None = None) -> Self:
Whether to make a copy for non-object or non-inferable columns
or Series.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
same type as input object
@@ -10077,6 +10147,18 @@ def align(
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
fill_value : scalar, default np.nan
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
@@ -11099,6 +11181,18 @@ def truncate(
copy : bool, default is True,
Return a copy of the truncated section.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
type of caller
@@ -11255,6 +11349,18 @@ def tz_convert(
copy : bool, default True
Also make a copy of the underlying data.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
{klass}
@@ -11344,6 +11450,18 @@ def tz_localize(
must be None.
copy : bool, default True
Also make a copy of the underlying data.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a9679f22f9933..1e54682498618 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4295,7 +4295,19 @@ def nsmallest(
klass=_shared_doc_kwargs["klass"],
extra_params=dedent(
"""copy : bool, default True
- Whether to copy underlying data."""
+ Whether to copy underlying data.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``"""
),
examples=dedent(
"""\
@@ -4948,6 +4960,18 @@ def rename(
Unused. Parameter needed for compatibility with DataFrame.
copy : bool, default True
Also copy underlying data.
+
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
inplace : bool, default False
Whether to return a new Series. If True the value of copy is ignored.
level : int or level name, default None
@@ -5732,6 +5756,18 @@ def to_timestamp(
copy : bool, default True
Whether or not to return a copy.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
Series with DatetimeIndex
@@ -5784,6 +5820,18 @@ def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series
copy : bool, default True
Whether or not to return a copy.
+ .. note::
+ The `copy` keyword will change behavior in pandas 3.0.
+ `Copy-on-Write
+ <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
+ will be enabled by default, which means that all methods with a
+ `copy` keyword will use a lazy copy mechanism to defer the copy and
+ ignore the `copy` keyword. The `copy` keyword will be removed in a
+ future version of pandas.
+
+ You can already get the future behavior and improvements through
+ enabling copy on write ``pd.options.mode.copy_on_write = True``
+
Returns
-------
Series
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56162 | 2023-11-24T23:57:12Z | 2023-12-02T00:14:21Z | 2023-12-02T00:14:21Z | 2023-12-02T00:17:49Z |
DEPR: dtype inference in value_counts | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 8893fe0ecd398..9318e1d9ffaff 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -396,6 +396,7 @@ Other Deprecations
- Deprecated the ``errors="ignore"`` option in :func:`to_datetime`, :func:`to_timedelta`, and :func:`to_numeric`; explicitly catch exceptions instead (:issue:`54467`)
- Deprecated the ``fastpath`` keyword in the :class:`Series` constructor (:issue:`20110`)
- Deprecated the ``ordinal`` keyword in :class:`PeriodIndex`, use :meth:`PeriodIndex.from_ordinals` instead (:issue:`55960`)
+- Deprecated the behavior of :meth:`Series.value_counts` and :meth:`Index.value_counts` with object dtype; in a future version these will not perform dtype inference on the resulting :class:`Index`, do ``result.index = result.index.infer_objects()`` to retain the old behavior (:issue:`56161`)
- Deprecated the extension test classes ``BaseNoReduceTests``, ``BaseBooleanReduceTests``, and ``BaseNumericReduceTests``, use ``BaseReduceTests`` instead (:issue:`54663`)
- Deprecated the option ``mode.data_manager`` and the ``ArrayManager``; only the ``BlockManager`` will be available in future versions (:issue:`55043`)
- Deprecated the previous implementation of :class:`DataFrame.stack`; specify ``future_stack=True`` to adopt the future version (:issue:`53515`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index b38a27a9f6d0a..82de8ae96160f 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -932,6 +932,16 @@ def value_counts_internal(
idx = Index(keys)
if idx.dtype == bool and keys.dtype == object:
idx = idx.astype(object)
+ elif idx.dtype != keys.dtype:
+ warnings.warn(
+ # GH#56161
+ "The behavior of value_counts with object-dtype is deprecated. "
+ "In a future version, this will *not* perform dtype inference "
+ "on the resulting index. To retain the old behavior, use "
+ "`result.index = result.index.infer_objects()`",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
idx.name = index_name
result = Series(counts, index=idx, name=name, copy=False)
@@ -1712,8 +1722,16 @@ def union_with_duplicates(
"""
from pandas import Series
- l_count = value_counts_internal(lvals, dropna=False)
- r_count = value_counts_internal(rvals, dropna=False)
+ with warnings.catch_warnings():
+ # filter warning from object dtype inference; we will end up discarding
+ # the index here, so the deprecation does not affect the end result here.
+ warnings.filterwarnings(
+ "ignore",
+ "The behavior of value_counts with object-dtype is deprecated",
+ category=FutureWarning,
+ )
+ l_count = value_counts_internal(lvals, dropna=False)
+ r_count = value_counts_internal(rvals, dropna=False)
l_count, r_count = l_count.align(r_count, fill_value=0)
final_count = np.maximum(l_count.values, r_count.values)
final_count = Series(final_count, index=l_count.index, dtype="int", copy=False)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 2f1363f180d08..126484ed4a2a0 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -12,6 +12,7 @@
Union,
overload,
)
+import warnings
import numpy as np
@@ -1226,7 +1227,16 @@ def value_counts(self, dropna: bool = True) -> Series:
Series.value_counts
"""
# TODO: implement this is a non-naive way!
- return value_counts(np.asarray(self), dropna=dropna)
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ "The behavior of value_counts with object-dtype is deprecated",
+ category=FutureWarning,
+ )
+ result = value_counts(np.asarray(self), dropna=dropna)
+ # Once the deprecation is enforced, we will need to do
+ # `result.index = result.index.astype(self.dtype)`
+ return result
# ---------------------------------------------------------------------
# Rendering Methods
diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py
index c42d064c476bb..75915b7c67548 100644
--- a/pandas/tests/base/test_value_counts.py
+++ b/pandas/tests/base/test_value_counts.py
@@ -336,3 +336,16 @@ def test_value_counts_with_nan(dropna, index_or_series):
else:
expected = Series([1, 1, 1], index=[True, pd.NA, np.nan], name="count")
tm.assert_series_equal(res, expected)
+
+
+def test_value_counts_object_inference_deprecated():
+ # GH#56161
+ dti = pd.date_range("2016-01-01", periods=3, tz="UTC")
+
+ idx = dti.astype(object)
+ msg = "The behavior of value_counts with object-dtype is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = idx.value_counts()
+
+ exp = dti.value_counts()
+ tm.assert_series_equal(res, exp)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Surfaced while implementing #55564 | https://api.github.com/repos/pandas-dev/pandas/pulls/56161 | 2023-11-24T23:56:41Z | 2023-11-26T04:13:39Z | 2023-11-26T04:13:39Z | 2023-11-26T05:03:10Z |
BUG: reset_index not preserving object dtype for string option | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 723c33280a679..1c412701ae5e9 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -30,6 +30,7 @@ Bug fixes
- Fixed bug in :meth:`DataFrame.to_hdf` raising when columns have ``StringDtype`` (:issue:`55088`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`Series.mode` not keeping object dtype when ``infer_string`` is set (:issue:`56183`)
+- Fixed bug in :meth:`Series.reset_index` not preserving object dtype when ``infer_string`` is set (:issue:`56160`)
- Fixed bug in :meth:`Series.str.split` and :meth:`Series.str.rsplit` when ``pat=None`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`56271`)
- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 464e066b4e86a..f884e61fac27b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1719,7 +1719,7 @@ def reset_index(
return new_ser.__finalize__(self, method="reset_index")
else:
return self._constructor(
- self._values.copy(), index=new_index, copy=False
+ self._values.copy(), index=new_index, copy=False, dtype=self.dtype
).__finalize__(self, method="reset_index")
elif inplace:
raise TypeError(
diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py
index 634b8699a89e6..48e2608a1032a 100644
--- a/pandas/tests/series/methods/test_reset_index.py
+++ b/pandas/tests/series/methods/test_reset_index.py
@@ -11,6 +11,7 @@
RangeIndex,
Series,
date_range,
+ option_context,
)
import pandas._testing as tm
@@ -167,6 +168,14 @@ def test_reset_index_inplace_and_drop_ignore_name(self):
expected = Series(range(2), name="old")
tm.assert_series_equal(ser, expected)
+ def test_reset_index_drop_infer_string(self):
+ # GH#56160
+ pytest.importorskip("pyarrow")
+ ser = Series(["a", "b", "c"], dtype=object)
+ with option_context("future.infer_string", True):
+ result = ser.reset_index(drop=True)
+ tm.assert_series_equal(result, ser)
+
@pytest.mark.parametrize(
"array, dtype",
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56160 | 2023-11-24T23:47:10Z | 2023-12-07T20:05:12Z | 2023-12-07T20:05:12Z | 2023-12-07T21:35:46Z |
Adjust tests in strings folder for new string option | diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 9fa6e9973291d..75866c6f6013a 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -44,6 +44,7 @@
)
from pandas.core.dtypes.missing import isna
+from pandas.core.arrays import ExtensionArray
from pandas.core.base import NoNewAttributesMixin
from pandas.core.construction import extract_array
@@ -456,7 +457,7 @@ def _get_series_list(self, others):
# in case of list-like `others`, all elements must be
# either Series/Index/np.ndarray (1-dim)...
if all(
- isinstance(x, (ABCSeries, ABCIndex))
+ isinstance(x, (ABCSeries, ABCIndex, ExtensionArray))
or (isinstance(x, np.ndarray) and x.ndim == 1)
for x in others
):
@@ -690,12 +691,15 @@ def cat(
out: Index | Series
if isinstance(self._orig, ABCIndex):
# add dtype for case that result is all-NA
+ dtype = None
+ if isna(result).all():
+ dtype = object
- out = Index(result, dtype=object, name=self._orig.name)
+ out = Index(result, dtype=dtype, name=self._orig.name)
else: # Series
if isinstance(self._orig.dtype, CategoricalDtype):
# We need to infer the new categories.
- dtype = None
+ dtype = self._orig.dtype.categories.dtype # type: ignore[assignment]
else:
dtype = self._orig.dtype
res_ser = Series(
@@ -914,7 +918,13 @@ def split(
if is_re(pat):
regex = True
result = self._data.array._str_split(pat, n, expand, regex)
- return self._wrap_result(result, returns_string=expand, expand=expand)
+ if self._data.dtype == "category":
+ dtype = self._data.dtype.categories.dtype
+ else:
+ dtype = object if self._data.dtype == object else None
+ return self._wrap_result(
+ result, expand=expand, returns_string=expand, dtype=dtype
+ )
@Appender(
_shared_docs["str_split"]
@@ -932,7 +942,10 @@ def split(
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, *, n=-1, expand: bool = False):
result = self._data.array._str_rsplit(pat, n=n)
- return self._wrap_result(result, expand=expand, returns_string=expand)
+ dtype = object if self._data.dtype == object else None
+ return self._wrap_result(
+ result, expand=expand, returns_string=expand, dtype=dtype
+ )
_shared_docs[
"str_partition"
@@ -1028,7 +1041,13 @@ def rsplit(self, pat=None, *, n=-1, expand: bool = False):
@forbid_nonstring_types(["bytes"])
def partition(self, sep: str = " ", expand: bool = True):
result = self._data.array._str_partition(sep, expand)
- return self._wrap_result(result, expand=expand, returns_string=expand)
+ if self._data.dtype == "category":
+ dtype = self._data.dtype.categories.dtype
+ else:
+ dtype = object if self._data.dtype == object else None
+ return self._wrap_result(
+ result, expand=expand, returns_string=expand, dtype=dtype
+ )
@Appender(
_shared_docs["str_partition"]
@@ -1042,7 +1061,13 @@ def partition(self, sep: str = " ", expand: bool = True):
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep: str = " ", expand: bool = True):
result = self._data.array._str_rpartition(sep, expand)
- return self._wrap_result(result, expand=expand, returns_string=expand)
+ if self._data.dtype == "category":
+ dtype = self._data.dtype.categories.dtype
+ else:
+ dtype = object if self._data.dtype == object else None
+ return self._wrap_result(
+ result, expand=expand, returns_string=expand, dtype=dtype
+ )
def get(self, i):
"""
@@ -2748,7 +2773,7 @@ def extract(
else:
name = _get_single_group_name(regex)
result = self._data.array._str_extract(pat, flags=flags, expand=returns_df)
- return self._wrap_result(result, name=name)
+ return self._wrap_result(result, name=name, dtype=result_dtype)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags: int = 0) -> DataFrame:
@@ -3488,7 +3513,7 @@ def str_extractall(arr, pat, flags: int = 0) -> DataFrame:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
- arr = arr.to_series().reset_index(drop=True)
+ arr = arr.to_series().reset_index(drop=True).astype(arr.dtype)
columns = _get_group_names(regex)
match_list = []
diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py
index 2914b22a52e94..31e005466af7b 100644
--- a/pandas/tests/strings/test_api.py
+++ b/pandas/tests/strings/test_api.py
@@ -2,11 +2,13 @@
import pytest
from pandas import (
+ CategoricalDtype,
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
+ option_context,
)
from pandas.core.strings.accessor import StringMethods
@@ -162,7 +164,8 @@ def test_api_per_method(
if inferred_dtype in allowed_types:
# xref GH 23555, GH 23556
- method(*args, **kwargs) # works!
+ with option_context("future.no_silent_downcasting", True):
+ method(*args, **kwargs) # works!
else:
# GH 23011, GH 23163
msg = (
@@ -178,6 +181,7 @@ def test_api_for_categorical(any_string_method, any_string_dtype):
s = Series(list("aabb"), dtype=any_string_dtype)
s = s + " " + s
c = s.astype("category")
+ c = c.astype(CategoricalDtype(c.dtype.categories.astype("object")))
assert isinstance(c.str, StringMethods)
method_name, args, kwargs = any_string_method
diff --git a/pandas/tests/strings/test_case_justify.py b/pandas/tests/strings/test_case_justify.py
index 1dee25e631648..41aedae90ca76 100644
--- a/pandas/tests/strings/test_case_justify.py
+++ b/pandas/tests/strings/test_case_justify.py
@@ -21,7 +21,8 @@ def test_title_mixed_object():
s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
result = s.str.title()
expected = Series(
- ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan]
+ ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_almost_equal(result, expected)
@@ -41,11 +42,15 @@ def test_lower_upper_mixed_object():
s = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
result = s.str.upper()
- expected = Series(["A", np.nan, "B", np.nan, np.nan, "FOO", None, np.nan, np.nan])
+ expected = Series(
+ ["A", np.nan, "B", np.nan, np.nan, "FOO", None, np.nan, np.nan], dtype=object
+ )
tm.assert_series_equal(result, expected)
result = s.str.lower()
- expected = Series(["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan])
+ expected = Series(
+ ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object
+ )
tm.assert_series_equal(result, expected)
@@ -71,7 +76,8 @@ def test_capitalize_mixed_object():
s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
result = s.str.capitalize()
expected = Series(
- ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan]
+ ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -87,7 +93,8 @@ def test_swapcase_mixed_object():
s = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0])
result = s.str.swapcase()
expected = Series(
- ["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", None, np.nan, np.nan]
+ ["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -138,19 +145,22 @@ def test_pad_mixed_object():
result = s.str.pad(5, side="left")
expected = Series(
- [" a", np.nan, " b", np.nan, np.nan, " ee", None, np.nan, np.nan]
+ [" a", np.nan, " b", np.nan, np.nan, " ee", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
result = s.str.pad(5, side="right")
expected = Series(
- ["a ", np.nan, "b ", np.nan, np.nan, "ee ", None, np.nan, np.nan]
+ ["a ", np.nan, "b ", np.nan, np.nan, "ee ", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
result = s.str.pad(5, side="both")
expected = Series(
- [" a ", np.nan, " b ", np.nan, np.nan, " ee ", None, np.nan, np.nan]
+ [" a ", np.nan, " b ", np.nan, np.nan, " ee ", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -238,7 +248,8 @@ def test_center_ljust_rjust_mixed_object():
None,
np.nan,
np.nan,
- ]
+ ],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -255,7 +266,8 @@ def test_center_ljust_rjust_mixed_object():
None,
np.nan,
np.nan,
- ]
+ ],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -272,7 +284,8 @@ def test_center_ljust_rjust_mixed_object():
None,
np.nan,
np.nan,
- ]
+ ],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/strings/test_cat.py b/pandas/tests/strings/test_cat.py
index 3e620b7664335..284932491a65e 100644
--- a/pandas/tests/strings/test_cat.py
+++ b/pandas/tests/strings/test_cat.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas import (
DataFrame,
Index,
@@ -10,6 +12,7 @@
Series,
_testing as tm,
concat,
+ option_context,
)
@@ -26,45 +29,49 @@ def test_str_cat_name(index_or_series, other):
assert result.name == "name"
-def test_str_cat(index_or_series):
- box = index_or_series
- # test_cat above tests "str_cat" from ndarray;
- # here testing "str.cat" from Series/Index to ndarray/list
- s = box(["a", "a", "b", "b", "c", np.nan])
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
+def test_str_cat(index_or_series, infer_string):
+ with option_context("future.infer_string", infer_string):
+ box = index_or_series
+ # test_cat above tests "str_cat" from ndarray;
+ # here testing "str.cat" from Series/Index to ndarray/list
+ s = box(["a", "a", "b", "b", "c", np.nan])
- # single array
- result = s.str.cat()
- expected = "aabbc"
- assert result == expected
+ # single array
+ result = s.str.cat()
+ expected = "aabbc"
+ assert result == expected
- result = s.str.cat(na_rep="-")
- expected = "aabbc-"
- assert result == expected
+ result = s.str.cat(na_rep="-")
+ expected = "aabbc-"
+ assert result == expected
- result = s.str.cat(sep="_", na_rep="NA")
- expected = "a_a_b_b_c_NA"
- assert result == expected
+ result = s.str.cat(sep="_", na_rep="NA")
+ expected = "a_a_b_b_c_NA"
+ assert result == expected
- t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object)
- expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"])
+ t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object)
+ expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"])
- # Series/Index with array
- result = s.str.cat(t, na_rep="-")
- tm.assert_equal(result, expected)
+ # Series/Index with array
+ result = s.str.cat(t, na_rep="-")
+ tm.assert_equal(result, expected)
- # Series/Index with list
- result = s.str.cat(list(t), na_rep="-")
- tm.assert_equal(result, expected)
+ # Series/Index with list
+ result = s.str.cat(list(t), na_rep="-")
+ tm.assert_equal(result, expected)
- # errors for incorrect lengths
- rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
- z = Series(["1", "2", "3"])
+ # errors for incorrect lengths
+ rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
+ z = Series(["1", "2", "3"])
- with pytest.raises(ValueError, match=rgx):
- s.str.cat(z.values)
+ with pytest.raises(ValueError, match=rgx):
+ s.str.cat(z.values)
- with pytest.raises(ValueError, match=rgx):
- s.str.cat(list(z))
+ with pytest.raises(ValueError, match=rgx):
+ s.str.cat(list(z))
def test_str_cat_raises_intuitive_error(index_or_series):
@@ -78,39 +85,54 @@ def test_str_cat_raises_intuitive_error(index_or_series):
s.str.cat(" ")
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
@pytest.mark.parametrize("sep", ["", None])
@pytest.mark.parametrize("dtype_target", ["object", "category"])
@pytest.mark.parametrize("dtype_caller", ["object", "category"])
-def test_str_cat_categorical(index_or_series, dtype_caller, dtype_target, sep):
+def test_str_cat_categorical(
+ index_or_series, dtype_caller, dtype_target, sep, infer_string
+):
box = index_or_series
- s = Index(["a", "a", "b", "a"], dtype=dtype_caller)
- s = s if box == Index else Series(s, index=s)
- t = Index(["b", "a", "b", "c"], dtype=dtype_target)
-
- expected = Index(["ab", "aa", "bb", "ac"])
- expected = expected if box == Index else Series(expected, index=s)
+ with option_context("future.infer_string", infer_string):
+ s = Index(["a", "a", "b", "a"], dtype=dtype_caller)
+ s = s if box == Index else Series(s, index=s)
+ t = Index(["b", "a", "b", "c"], dtype=dtype_target)
- # Series/Index with unaligned Index -> t.values
- result = s.str.cat(t.values, sep=sep)
- tm.assert_equal(result, expected)
-
- # Series/Index with Series having matching Index
- t = Series(t.values, index=s)
- result = s.str.cat(t, sep=sep)
- tm.assert_equal(result, expected)
-
- # Series/Index with Series.values
- result = s.str.cat(t.values, sep=sep)
- tm.assert_equal(result, expected)
+ expected = Index(["ab", "aa", "bb", "ac"])
+ expected = (
+ expected
+ if box == Index
+ else Series(expected, index=Index(s, dtype=dtype_caller))
+ )
- # Series/Index with Series having different Index
- t = Series(t.values, index=t.values)
- expected = Index(["aa", "aa", "bb", "bb", "aa"])
- expected = expected if box == Index else Series(expected, index=expected.str[:1])
+ # Series/Index with unaligned Index -> t.values
+ result = s.str.cat(t.values, sep=sep)
+ tm.assert_equal(result, expected)
+
+ # Series/Index with Series having matching Index
+ t = Series(t.values, index=Index(s, dtype=dtype_caller))
+ result = s.str.cat(t, sep=sep)
+ tm.assert_equal(result, expected)
+
+ # Series/Index with Series.values
+ result = s.str.cat(t.values, sep=sep)
+ tm.assert_equal(result, expected)
+
+ # Series/Index with Series having different Index
+ t = Series(t.values, index=t.values)
+ expected = Index(["aa", "aa", "bb", "bb", "aa"])
+ dtype = object if dtype_caller == "object" else s.dtype.categories.dtype
+ expected = (
+ expected
+ if box == Index
+ else Series(expected, index=Index(expected.str[:1], dtype=dtype))
+ )
- result = s.str.cat(t, sep=sep)
- tm.assert_equal(result, expected)
+ result = s.str.cat(t, sep=sep)
+ tm.assert_equal(result, expected)
@pytest.mark.parametrize(
@@ -321,8 +343,9 @@ def test_str_cat_all_na(index_or_series, index_or_series2):
# all-NA target
if box == Series:
- expected = Series([np.nan] * 4, index=s.index, dtype=object)
+ expected = Series([np.nan] * 4, index=s.index, dtype=s.dtype)
else: # box == Index
+ # TODO: Strimg option, this should return string dtype
expected = Index([np.nan] * 4, dtype=object)
result = s.str.cat(t, join="left")
tm.assert_equal(result, expected)
diff --git a/pandas/tests/strings/test_extract.py b/pandas/tests/strings/test_extract.py
index 9ad9b1eca41d9..77d008c650264 100644
--- a/pandas/tests/strings/test_extract.py
+++ b/pandas/tests/strings/test_extract.py
@@ -47,13 +47,16 @@ def test_extract_expand_False_mixed_object():
# two groups
result = ser.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
er = [np.nan, np.nan] # empty row
- expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
+ expected = DataFrame(
+ [["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er], dtype=object
+ )
tm.assert_frame_equal(result, expected)
# single group
result = ser.str.extract(".*(BAD[_]+).*BAD", expand=False)
expected = Series(
- ["BAD_", np.nan, "BAD_", np.nan, np.nan, np.nan, None, np.nan, np.nan]
+ ["BAD_", np.nan, "BAD_", np.nan, np.nan, np.nan, None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -238,7 +241,9 @@ def test_extract_expand_True_mixed_object():
)
result = mixed.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
- expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
+ expected = DataFrame(
+ [["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er], dtype=object
+ )
tm.assert_frame_equal(result, expected)
@@ -603,8 +608,8 @@ def test_extractall_stringindex(any_string_dtype):
# index.name doesn't affect to the result
if any_string_dtype == "object":
for idx in [
- Index(["a1a2", "b1", "c1"]),
- Index(["a1a2", "b1", "c1"], name="xxx"),
+ Index(["a1a2", "b1", "c1"], dtype=object),
+ Index(["a1a2", "b1", "c1"], name="xxx", dtype=object),
]:
result = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py
index bd64a5dce3b9a..3f58c6d703f8f 100644
--- a/pandas/tests/strings/test_find_replace.py
+++ b/pandas/tests/strings/test_find_replace.py
@@ -242,7 +242,7 @@ def test_contains_nan(any_string_dtype):
@pytest.mark.parametrize("pat", ["foo", ("foo", "baz")])
-@pytest.mark.parametrize("dtype", [None, "category"])
+@pytest.mark.parametrize("dtype", ["object", "category"])
@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA])
@pytest.mark.parametrize("na", [True, False])
def test_startswith(pat, dtype, null_value, na):
@@ -254,10 +254,10 @@ def test_startswith(pat, dtype, null_value, na):
result = values.str.startswith(pat)
exp = Series([False, np.nan, True, False, False, np.nan, True])
- if dtype is None and null_value is pd.NA:
+ if dtype == "object" and null_value is pd.NA:
# GH#18463
exp = exp.fillna(null_value)
- elif dtype is None and null_value is None:
+ elif dtype == "object" and null_value is None:
exp[exp.isna()] = None
tm.assert_series_equal(result, exp)
@@ -300,7 +300,7 @@ def test_startswith_nullable_string_dtype(nullable_string_dtype, na):
@pytest.mark.parametrize("pat", ["foo", ("foo", "baz")])
-@pytest.mark.parametrize("dtype", [None, "category"])
+@pytest.mark.parametrize("dtype", ["object", "category"])
@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA])
@pytest.mark.parametrize("na", [True, False])
def test_endswith(pat, dtype, null_value, na):
@@ -312,10 +312,10 @@ def test_endswith(pat, dtype, null_value, na):
result = values.str.endswith(pat)
exp = Series([False, np.nan, False, False, True, np.nan, True])
- if dtype is None and null_value is pd.NA:
+ if dtype == "object" and null_value is pd.NA:
# GH#18463
- exp = exp.fillna(pd.NA)
- elif dtype is None and null_value is None:
+ exp = exp.fillna(null_value)
+ elif dtype == "object" and null_value is None:
exp[exp.isna()] = None
tm.assert_series_equal(result, exp)
@@ -382,7 +382,9 @@ def test_replace_mixed_object():
["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0]
)
result = Series(ser).str.replace("BAD[_]*", "", regex=True)
- expected = Series(["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan])
+ expected = Series(
+ ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object
+ )
tm.assert_series_equal(result, expected)
@@ -469,7 +471,9 @@ def test_replace_compiled_regex_mixed_object():
["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0]
)
result = Series(ser).str.replace(pat, "", regex=True)
- expected = Series(["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan])
+ expected = Series(
+ ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object
+ )
tm.assert_series_equal(result, expected)
@@ -913,7 +917,7 @@ def test_translate_mixed_object():
# Series with non-string values
s = Series(["a", "b", "c", 1.2])
table = str.maketrans("abc", "cde")
- expected = Series(["c", "d", "e", np.nan])
+ expected = Series(["c", "d", "e", np.nan], dtype=object)
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py
index 0a7d409773dd6..9ff1fc0e13ae9 100644
--- a/pandas/tests/strings/test_split_partition.py
+++ b/pandas/tests/strings/test_split_partition.py
@@ -681,14 +681,16 @@ def test_partition_sep_kwarg(any_string_dtype, method):
def test_get():
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
result = ser.str.split("_").str.get(1)
- expected = Series(["b", "d", np.nan, "g"])
+ expected = Series(["b", "d", np.nan, "g"], dtype=object)
tm.assert_series_equal(result, expected)
def test_get_mixed_object():
ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0])
result = ser.str.split("_").str.get(1)
- expected = Series(["b", np.nan, "d", np.nan, np.nan, None, np.nan, np.nan])
+ expected = Series(
+ ["b", np.nan, "d", np.nan, np.nan, None, np.nan, np.nan], dtype=object
+ )
tm.assert_series_equal(result, expected)
@@ -696,7 +698,7 @@ def test_get_mixed_object():
def test_get_bounds(idx):
ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"])
result = ser.str.split("_").str.get(idx)
- expected = Series(["3", "8", np.nan])
+ expected = Series(["3", "8", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/strings/test_string_array.py b/pandas/tests/strings/test_string_array.py
index a88dcc8956931..0b3f368afea5e 100644
--- a/pandas/tests/strings/test_string_array.py
+++ b/pandas/tests/strings/test_string_array.py
@@ -8,6 +8,7 @@
DataFrame,
Series,
_testing as tm,
+ option_context,
)
@@ -56,7 +57,8 @@ def test_string_array(nullable_string_dtype, any_string_method):
columns = expected.select_dtypes(include="object").columns
assert all(result[columns].dtypes == nullable_string_dtype)
result[columns] = result[columns].astype(object)
- expected[columns] = expected[columns].fillna(NA) # GH#18463
+ with option_context("future.no_silent_downcasting", True):
+ expected[columns] = expected[columns].fillna(NA) # GH#18463
tm.assert_equal(result, expected)
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index 4315835b70a40..f662dfd7e2b14 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -76,7 +76,8 @@ def test_repeat_mixed_object():
ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
result = ser.str.repeat(3)
expected = Series(
- ["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", None, np.nan, np.nan]
+ ["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -270,7 +271,8 @@ def test_spilt_join_roundtrip_mixed_object():
)
result = ser.str.split("_").str.join("_")
expected = Series(
- ["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", None, np.nan, np.nan]
+ ["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", None, np.nan, np.nan],
+ dtype=object,
)
tm.assert_series_equal(result, expected)
@@ -398,7 +400,7 @@ def test_slice(start, stop, step, expected, any_string_dtype):
def test_slice_mixed_object(start, stop, step, expected):
ser = Series(["aafootwo", np.nan, "aabartwo", True, datetime.today(), None, 1, 2.0])
result = ser.str.slice(start, stop, step)
- expected = Series(expected)
+ expected = Series(expected, dtype=object)
tm.assert_series_equal(result, expected)
@@ -453,7 +455,7 @@ def test_strip_lstrip_rstrip_mixed_object(method, exp):
ser = Series([" aa ", np.nan, " bb \t\n", True, datetime.today(), None, 1, 2.0])
result = getattr(ser.str, method)()
- expected = Series(exp + [np.nan, np.nan, None, np.nan, np.nan])
+ expected = Series(exp + [np.nan, np.nan, None, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
@@ -529,7 +531,7 @@ def test_string_slice_out_of_bounds(any_string_dtype):
def test_encode_decode(any_string_dtype):
ser = Series(["a", "b", "a\xe4"], dtype=any_string_dtype).str.encode("utf-8")
result = ser.str.decode("utf-8")
- expected = ser.map(lambda x: x.decode("utf-8"))
+ expected = ser.map(lambda x: x.decode("utf-8")).astype(object)
tm.assert_series_equal(result, expected)
@@ -559,7 +561,7 @@ def test_decode_errors_kwarg():
ser.str.decode("cp1252")
result = ser.str.decode("cp1252", "ignore")
- expected = ser.map(lambda x: x.decode("cp1252", "ignore"))
+ expected = ser.map(lambda x: x.decode("cp1252", "ignore")).astype(object)
tm.assert_series_equal(result, expected)
@@ -672,7 +674,7 @@ def test_str_accessor_in_apply_func():
def test_zfill():
# https://github.com/pandas-dev/pandas/issues/20868
value = Series(["-1", "1", "1000", 10, np.nan])
- expected = Series(["-01", "001", "1000", np.nan, np.nan])
+ expected = Series(["-01", "001", "1000", np.nan, np.nan], dtype=object)
tm.assert_series_equal(value.str.zfill(3), expected)
value = Series(["-2", "+5"])
@@ -704,10 +706,10 @@ def test_get_with_dict_label():
]
)
result = s.str.get("name")
- expected = Series(["Hello", "Goodbye", None])
+ expected = Series(["Hello", "Goodbye", None], dtype=object)
tm.assert_series_equal(result, expected)
result = s.str.get("value")
- expected = Series(["World", "Planet", "Sea"])
+ expected = Series(["World", "Planet", "Sea"], dtype=object)
tm.assert_series_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56159 | 2023-11-24T23:40:32Z | 2023-12-09T19:24:31Z | 2023-12-09T19:24:31Z | 2023-12-09T19:38:49Z |
BUG: round with non-nanosecond raising OverflowError | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 8893fe0ecd398..494d1931d9fa7 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -452,6 +452,7 @@ Datetimelike
- Bug in :meth:`DatetimeIndex.union` returning object dtype for tz-aware indexes with the same timezone but different units (:issue:`55238`)
- Bug in :meth:`Index.is_monotonic_increasing` and :meth:`Index.is_monotonic_decreasing` always caching :meth:`Index.is_unique` as ``True`` when first value in index is ``NaT`` (:issue:`55755`)
- Bug in :meth:`Index.view` to a datetime64 dtype with non-supported resolution incorrectly raising (:issue:`55710`)
+- Bug in :meth:`Series.dt.round` with non-nanosecond resolution and ``NaT`` entries incorrectly raising ``OverflowError`` (:issue:`56158`)
- Bug in :meth:`Tick.delta` with very large ticks raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`)
- Bug in ``.astype`` converting from a higher-resolution ``datetime64`` dtype to a lower-resolution ``datetime64`` dtype (e.g. ``datetime64[us]->datetim64[ms]``) silently overflowing with values near the lower implementation bound (:issue:`55979`)
- Bug in adding or subtracting a :class:`Week` offset to a ``datetime64`` :class:`Series`, :class:`Index`, or :class:`DataFrame` column with non-nanosecond resolution returning incorrect results (:issue:`55583`)
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index a726c735bf9a1..ff4fb4d635d17 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -746,7 +746,31 @@ cdef ndarray[int64_t] _ceil_int64(const int64_t[:] values, int64_t unit):
cdef ndarray[int64_t] _rounddown_int64(values, int64_t unit):
- return _ceil_int64(values - unit // 2, unit)
+ cdef:
+ Py_ssize_t i, n = len(values)
+ ndarray[int64_t] result = np.empty(n, dtype="i8")
+ int64_t res, value, remainder, half
+
+ half = unit // 2
+
+ with cython.overflowcheck(True):
+ for i in range(n):
+ value = values[i]
+
+ if value == NPY_NAT:
+ res = NPY_NAT
+ else:
+ # This adjustment is the only difference between rounddown_int64
+ # and _ceil_int64
+ value = value - half
+ remainder = value % unit
+ if remainder == 0:
+ res = value
+ else:
+ res = value + (unit - remainder)
+
+ result[i] = res
+ return result
cdef ndarray[int64_t] _roundup_int64(values, int64_t unit):
diff --git a/pandas/tests/series/methods/test_round.py b/pandas/tests/series/methods/test_round.py
index 6c40e36419551..7f60c94f10e4f 100644
--- a/pandas/tests/series/methods/test_round.py
+++ b/pandas/tests/series/methods/test_round.py
@@ -56,9 +56,10 @@ def test_round_builtin(self, any_float_dtype):
@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
- def test_round_nat(self, method, freq):
- # GH14940
- ser = Series([pd.NaT])
- expected = Series(pd.NaT)
+ def test_round_nat(self, method, freq, unit):
+ # GH14940, GH#56158
+ ser = Series([pd.NaT], dtype=f"M8[{unit}]")
+ expected = Series(pd.NaT, dtype=f"M8[{unit}]")
round_method = getattr(ser.dt, method)
- tm.assert_series_equal(round_method(freq), expected)
+ result = round_method(freq)
+ tm.assert_series_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56158 | 2023-11-24T23:08:11Z | 2023-11-26T04:23:38Z | 2023-11-26T04:23:38Z | 2023-11-26T05:00:13Z |
BUG: Index.str.cat casting result always to object | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index c878fd2664dc4..99faad8aff986 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -576,6 +576,7 @@ Strings
^^^^^^^
- Bug in :func:`pandas.api.types.is_string_dtype` while checking object array with no elements is of the string dtype (:issue:`54661`)
- Bug in :meth:`DataFrame.apply` failing when ``engine="numba"`` and columns or index have ``StringDtype`` (:issue:`56189`)
+- Bug in :meth:`Index.str.cat` always casting result to object dtype (:issue:`56157`)
- Bug in :meth:`Series.__mul__` for :class:`ArrowDtype` with ``pyarrow.string`` dtype and ``string[pyarrow]`` for the pyarrow backend (:issue:`51970`)
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with arguments of type ``tuple[str, ...]`` for ``string[pyarrow]`` (:issue:`54942`)
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 9fa6e9973291d..127aee24e094f 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -44,6 +44,7 @@
)
from pandas.core.dtypes.missing import isna
+from pandas.core.arrays import ExtensionArray
from pandas.core.base import NoNewAttributesMixin
from pandas.core.construction import extract_array
@@ -456,7 +457,7 @@ def _get_series_list(self, others):
# in case of list-like `others`, all elements must be
# either Series/Index/np.ndarray (1-dim)...
if all(
- isinstance(x, (ABCSeries, ABCIndex))
+ isinstance(x, (ABCSeries, ABCIndex, ExtensionArray))
or (isinstance(x, np.ndarray) and x.ndim == 1)
for x in others
):
@@ -690,12 +691,15 @@ def cat(
out: Index | Series
if isinstance(self._orig, ABCIndex):
# add dtype for case that result is all-NA
+ dtype = None
+ if isna(result).all():
+ dtype = object
- out = Index(result, dtype=object, name=self._orig.name)
+ out = Index(result, dtype=dtype, name=self._orig.name)
else: # Series
if isinstance(self._orig.dtype, CategoricalDtype):
# We need to infer the new categories.
- dtype = None
+ dtype = self._orig.dtype.categories.dtype # type: ignore[assignment]
else:
dtype = self._orig.dtype
res_ser = Series(
diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py
index 2914b22a52e94..fd2501835318d 100644
--- a/pandas/tests/strings/test_api.py
+++ b/pandas/tests/strings/test_api.py
@@ -2,6 +2,7 @@
import pytest
from pandas import (
+ CategoricalDtype,
DataFrame,
Index,
MultiIndex,
@@ -178,6 +179,7 @@ def test_api_for_categorical(any_string_method, any_string_dtype):
s = Series(list("aabb"), dtype=any_string_dtype)
s = s + " " + s
c = s.astype("category")
+ c = c.astype(CategoricalDtype(c.dtype.categories.astype("object")))
assert isinstance(c.str, StringMethods)
method_name, args, kwargs = any_string_method
diff --git a/pandas/tests/strings/test_cat.py b/pandas/tests/strings/test_cat.py
index 3e620b7664335..284932491a65e 100644
--- a/pandas/tests/strings/test_cat.py
+++ b/pandas/tests/strings/test_cat.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas import (
DataFrame,
Index,
@@ -10,6 +12,7 @@
Series,
_testing as tm,
concat,
+ option_context,
)
@@ -26,45 +29,49 @@ def test_str_cat_name(index_or_series, other):
assert result.name == "name"
-def test_str_cat(index_or_series):
- box = index_or_series
- # test_cat above tests "str_cat" from ndarray;
- # here testing "str.cat" from Series/Index to ndarray/list
- s = box(["a", "a", "b", "b", "c", np.nan])
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
+def test_str_cat(index_or_series, infer_string):
+ with option_context("future.infer_string", infer_string):
+ box = index_or_series
+ # test_cat above tests "str_cat" from ndarray;
+ # here testing "str.cat" from Series/Index to ndarray/list
+ s = box(["a", "a", "b", "b", "c", np.nan])
- # single array
- result = s.str.cat()
- expected = "aabbc"
- assert result == expected
+ # single array
+ result = s.str.cat()
+ expected = "aabbc"
+ assert result == expected
- result = s.str.cat(na_rep="-")
- expected = "aabbc-"
- assert result == expected
+ result = s.str.cat(na_rep="-")
+ expected = "aabbc-"
+ assert result == expected
- result = s.str.cat(sep="_", na_rep="NA")
- expected = "a_a_b_b_c_NA"
- assert result == expected
+ result = s.str.cat(sep="_", na_rep="NA")
+ expected = "a_a_b_b_c_NA"
+ assert result == expected
- t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object)
- expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"])
+ t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object)
+ expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"])
- # Series/Index with array
- result = s.str.cat(t, na_rep="-")
- tm.assert_equal(result, expected)
+ # Series/Index with array
+ result = s.str.cat(t, na_rep="-")
+ tm.assert_equal(result, expected)
- # Series/Index with list
- result = s.str.cat(list(t), na_rep="-")
- tm.assert_equal(result, expected)
+ # Series/Index with list
+ result = s.str.cat(list(t), na_rep="-")
+ tm.assert_equal(result, expected)
- # errors for incorrect lengths
- rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
- z = Series(["1", "2", "3"])
+ # errors for incorrect lengths
+ rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
+ z = Series(["1", "2", "3"])
- with pytest.raises(ValueError, match=rgx):
- s.str.cat(z.values)
+ with pytest.raises(ValueError, match=rgx):
+ s.str.cat(z.values)
- with pytest.raises(ValueError, match=rgx):
- s.str.cat(list(z))
+ with pytest.raises(ValueError, match=rgx):
+ s.str.cat(list(z))
def test_str_cat_raises_intuitive_error(index_or_series):
@@ -78,39 +85,54 @@ def test_str_cat_raises_intuitive_error(index_or_series):
s.str.cat(" ")
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
@pytest.mark.parametrize("sep", ["", None])
@pytest.mark.parametrize("dtype_target", ["object", "category"])
@pytest.mark.parametrize("dtype_caller", ["object", "category"])
-def test_str_cat_categorical(index_or_series, dtype_caller, dtype_target, sep):
+def test_str_cat_categorical(
+ index_or_series, dtype_caller, dtype_target, sep, infer_string
+):
box = index_or_series
- s = Index(["a", "a", "b", "a"], dtype=dtype_caller)
- s = s if box == Index else Series(s, index=s)
- t = Index(["b", "a", "b", "c"], dtype=dtype_target)
-
- expected = Index(["ab", "aa", "bb", "ac"])
- expected = expected if box == Index else Series(expected, index=s)
+ with option_context("future.infer_string", infer_string):
+ s = Index(["a", "a", "b", "a"], dtype=dtype_caller)
+ s = s if box == Index else Series(s, index=s)
+ t = Index(["b", "a", "b", "c"], dtype=dtype_target)
- # Series/Index with unaligned Index -> t.values
- result = s.str.cat(t.values, sep=sep)
- tm.assert_equal(result, expected)
-
- # Series/Index with Series having matching Index
- t = Series(t.values, index=s)
- result = s.str.cat(t, sep=sep)
- tm.assert_equal(result, expected)
-
- # Series/Index with Series.values
- result = s.str.cat(t.values, sep=sep)
- tm.assert_equal(result, expected)
+ expected = Index(["ab", "aa", "bb", "ac"])
+ expected = (
+ expected
+ if box == Index
+ else Series(expected, index=Index(s, dtype=dtype_caller))
+ )
- # Series/Index with Series having different Index
- t = Series(t.values, index=t.values)
- expected = Index(["aa", "aa", "bb", "bb", "aa"])
- expected = expected if box == Index else Series(expected, index=expected.str[:1])
+ # Series/Index with unaligned Index -> t.values
+ result = s.str.cat(t.values, sep=sep)
+ tm.assert_equal(result, expected)
+
+ # Series/Index with Series having matching Index
+ t = Series(t.values, index=Index(s, dtype=dtype_caller))
+ result = s.str.cat(t, sep=sep)
+ tm.assert_equal(result, expected)
+
+ # Series/Index with Series.values
+ result = s.str.cat(t.values, sep=sep)
+ tm.assert_equal(result, expected)
+
+ # Series/Index with Series having different Index
+ t = Series(t.values, index=t.values)
+ expected = Index(["aa", "aa", "bb", "bb", "aa"])
+ dtype = object if dtype_caller == "object" else s.dtype.categories.dtype
+ expected = (
+ expected
+ if box == Index
+ else Series(expected, index=Index(expected.str[:1], dtype=dtype))
+ )
- result = s.str.cat(t, sep=sep)
- tm.assert_equal(result, expected)
+ result = s.str.cat(t, sep=sep)
+ tm.assert_equal(result, expected)
@pytest.mark.parametrize(
@@ -321,8 +343,9 @@ def test_str_cat_all_na(index_or_series, index_or_series2):
# all-NA target
if box == Series:
- expected = Series([np.nan] * 4, index=s.index, dtype=object)
+ expected = Series([np.nan] * 4, index=s.index, dtype=s.dtype)
else: # box == Index
+ # TODO: Strimg option, this should return string dtype
expected = Index([np.nan] * 4, dtype=object)
result = s.str.cat(t, join="left")
tm.assert_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56157 | 2023-11-24T22:52:01Z | 2023-12-08T23:28:19Z | 2023-12-08T23:28:19Z | 2023-12-08T23:28:46Z |
BUG fixes for date_range boundaries | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 129f5cedb86c2..b9a066c3b88dd 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -892,6 +892,8 @@ Other
- Bug in :func:`DataFrame.describe` when formatting percentiles in the resulting percentile 99.999% is rounded to 100% (:issue:`55765`)
- Bug in :func:`cut` and :func:`qcut` with ``datetime64`` dtype values with non-nanosecond units incorrectly returning nanosecond-unit bins (:issue:`56101`)
- Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`)
+- Bug in :func:`date_range` where a timestamp out of the valid range would be produced with a negative ``freq`` parameter (:issue:`56147`)
+- Bug in :func:`date_range` where the last valid timestamp would sometimes not be produced (:issue:`56134`)
- Bug in :func:`infer_freq` and :meth:`DatetimeIndex.inferred_freq` with weekly frequencies and non-nanosecond resolutions (:issue:`55609`)
- Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`)
- Bug in :meth:`DataFrame.from_dict` which would always sort the rows of the created :class:`DataFrame`. (:issue:`55683`)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 6b7ddc4a72957..0367656254961 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2763,12 +2763,12 @@ def _generate_range(
if start and not offset.is_on_offset(start):
# Incompatible types in assignment (expression has type "datetime",
# variable has type "Optional[Timestamp]")
- start = offset.rollforward(start) # type: ignore[assignment]
- elif end and not offset.is_on_offset(end):
- # Incompatible types in assignment (expression has type "datetime",
- # variable has type "Optional[Timestamp]")
- end = offset.rollback(end) # type: ignore[assignment]
+ # GH #56147 account for negative direction and range bounds
+ if offset.n >= 0:
+ start = offset.rollforward(start) # type: ignore[assignment]
+ else:
+ start = offset.rollback(start) # type: ignore[assignment]
# Unsupported operand types for < ("Timestamp" and "None")
if periods is None and end < start and offset.n >= 0: # type: ignore[operator]
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 44dd64e162413..5ca83aadfca29 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -1539,6 +1539,21 @@ def test_date_range_year_end(self, unit):
)
tm.assert_index_equal(rng, exp)
+ def test_date_range_partial_day_year_end(self, unit):
+ # GH#56134
+ rng = date_range(
+ start="2021-12-31 00:00:01",
+ end="2023-10-31 00:00:00",
+ freq="YE",
+ unit=unit,
+ )
+ exp = DatetimeIndex(
+ ["2021-12-31 00:00:01", "2022-12-31 00:00:01"],
+ dtype=f"M8[{unit}]",
+ freq="YE",
+ )
+ tm.assert_index_equal(rng, exp)
+
def test_date_range_negative_freq_year_end(self, unit):
# GH#11018
rng = date_range("2011-12-31", freq="-2YE", periods=3, unit=unit)
@@ -1548,6 +1563,21 @@ def test_date_range_negative_freq_year_end(self, unit):
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2YE"
+ def test_date_range_negative_freq_year_end_inbounds(self, unit):
+ # GH#56147
+ rng = date_range(
+ start="2023-10-31 00:00:00",
+ end="2021-10-31 00:00:00",
+ freq="-1YE",
+ unit=unit,
+ )
+ exp = DatetimeIndex(
+ ["2022-12-31 00:00:00", "2021-12-31 00:00:00"],
+ dtype=f"M8[{unit}]",
+ freq="-1YE",
+ )
+ tm.assert_index_equal(rng, exp)
+
def test_date_range_business_year_end_year(self, unit):
# see GH#9313
rng = date_range("1/1/2013", "7/1/2017", freq="BYE", unit=unit)
| - [X] closes #56134
- [X] closes #56147
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Fix for two issues with date_range where:
* A valid timestamp in range would be accidentally dropped when using year end and the starting timestamp was not midnight (56134)
* A negative year end offset and start increment at a date earlier than year end would include the year end of the starting date (despite being out of bounds of start and end) | https://api.github.com/repos/pandas-dev/pandas/pulls/56156 | 2023-11-24T21:34:29Z | 2024-01-11T11:19:01Z | null | 2024-01-11T11:19:25Z |
TST/CLN: Remove makeStringIndex | diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
index 3c78b0a9a60c8..933e8fbc175d8 100644
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -4,8 +4,6 @@
import pandas as pd
-from .pandas_vb_common import tm
-
for imp in ["pandas.util", "pandas.tools.hashing"]:
try:
hashing = import_module(imp)
@@ -47,9 +45,12 @@ def setup(self, unique, sort, dtype):
elif dtype == "datetime64[ns, tz]":
data = pd.date_range("2011-01-01", freq="h", periods=N, tz="Asia/Tokyo")
elif dtype == "object_str":
- data = tm.makeStringIndex(N)
+ data = pd.Index([f"i-{i}" for i in range(N)], dtype=object)
elif dtype == "string[pyarrow]":
- data = pd.array(tm.makeStringIndex(N), dtype="string[pyarrow]")
+ data = pd.array(
+ pd.Index([f"i-{i}" for i in range(N)], dtype=object),
+ dtype="string[pyarrow]",
+ )
else:
raise NotImplementedError
@@ -88,7 +89,7 @@ def setup(self, unique, keep, dtype):
elif dtype == "float64":
data = pd.Index(np.random.randn(N), dtype="float64")
elif dtype == "string":
- data = tm.makeStringIndex(N)
+ data = pd.Index([f"i-{i}" for i in range(N)], dtype=object)
elif dtype == "datetime64[ns]":
data = pd.date_range("2011-01-01", freq="h", periods=N)
elif dtype == "datetime64[ns, tz]":
@@ -136,7 +137,9 @@ def setup_cache(self):
df = pd.DataFrame(
{
"strings": pd.Series(
- tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=N))
+ pd.Index([f"i-{i}" for i in range(10000)], dtype=object).take(
+ np.random.randint(0, 10000, size=N)
+ )
),
"floats": np.random.randn(N),
"ints": np.arange(N),
diff --git a/asv_bench/benchmarks/algos/isin.py b/asv_bench/benchmarks/algos/isin.py
index 92797425b2c30..2b3d32fb579dc 100644
--- a/asv_bench/benchmarks/algos/isin.py
+++ b/asv_bench/benchmarks/algos/isin.py
@@ -8,8 +8,6 @@
date_range,
)
-from ..pandas_vb_common import tm
-
class IsIn:
params = [
@@ -60,7 +58,9 @@ def setup(self, dtype):
elif dtype in ["str", "string[python]", "string[pyarrow]"]:
try:
- self.series = Series(tm.makeStringIndex(N), dtype=dtype)
+ self.series = Series(
+ Index([f"i-{i}" for i in range(N)], dtype=object), dtype=dtype
+ )
except ImportError:
raise NotImplementedError
self.values = list(self.series[:2])
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 2db00cc7f2ad9..77c9faf3d3a87 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -9,8 +9,6 @@
date_range,
)
-from .pandas_vb_common import tm
-
def no_change(arr):
return arr
@@ -115,7 +113,7 @@ def time_dtindex_from_index_with_series(self):
class MultiIndexConstructor:
def setup(self):
N = 10**4
- self.iterables = [tm.makeStringIndex(N), range(20)]
+ self.iterables = [Index([f"i-{i}" for i in range(N)], dtype=object), range(20)]
def time_multiindex_from_iterables(self):
MultiIndex.from_product(self.iterables)
diff --git a/asv_bench/benchmarks/dtypes.py b/asv_bench/benchmarks/dtypes.py
index c33043c0eddc1..7f3429b5e3882 100644
--- a/asv_bench/benchmarks/dtypes.py
+++ b/asv_bench/benchmarks/dtypes.py
@@ -3,7 +3,10 @@
import numpy as np
import pandas as pd
-from pandas import DataFrame
+from pandas import (
+ DataFrame,
+ Index,
+)
import pandas._testing as tm
from pandas.api.types import (
is_extension_array_dtype,
@@ -73,8 +76,8 @@ class SelectDtypes:
def setup(self, dtype):
N, K = 5000, 50
- self.index = tm.makeStringIndex(N)
- self.columns = tm.makeStringIndex(K)
+ self.index = Index([f"i-{i}" for i in range(N)], dtype=object)
+ self.columns = Index([f"i-{i}" for i in range(K)], dtype=object)
def create_df(data):
return DataFrame(data, index=self.index, columns=self.columns)
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 7092a679b8cf0..f938f7eb0d951 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -12,8 +12,6 @@
date_range,
)
-from .pandas_vb_common import tm
-
try:
from pandas.tseries.offsets import (
Hour,
@@ -30,8 +28,8 @@
class FromDicts:
def setup(self):
N, K = 5000, 50
- self.index = tm.makeStringIndex(N)
- self.columns = tm.makeStringIndex(K)
+ self.index = pd.Index([f"i-{i}" for i in range(N)], dtype=object)
+ self.columns = pd.Index([f"i-{i}" for i in range(K)], dtype=object)
frame = DataFrame(np.random.randn(N, K), index=self.index, columns=self.columns)
self.data = frame.to_dict()
self.dict_list = frame.to_dict(orient="records")
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index f22a261041e17..a283afd1f0f1e 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -5,6 +5,7 @@
from pandas import (
DataFrame,
+ Index,
MultiIndex,
NaT,
Series,
@@ -14,8 +15,6 @@
timedelta_range,
)
-from .pandas_vb_common import tm
-
class AsType:
params = [
@@ -703,8 +702,12 @@ def setup(self, monotonic):
K = 10
df = DataFrame(
{
- "key1": tm.makeStringIndex(N).values.repeat(K),
- "key2": tm.makeStringIndex(N).values.repeat(K),
+ "key1": Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(
+ K
+ ),
+ "key2": Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(
+ K
+ ),
"value": np.random.randn(N * K),
}
)
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index c78819f75c52a..a0c4189c72d0e 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -5,6 +5,7 @@
from pandas import (
DataFrame,
+ Index,
Series,
date_range,
factorize,
@@ -12,8 +13,6 @@
)
from pandas.core.algorithms import take_nd
-from .pandas_vb_common import tm
-
try:
from pandas import (
rolling_kurt,
@@ -34,7 +33,6 @@
except ImportError:
from pandas import algos
-
from .pandas_vb_common import BaseIO # isort:skip
@@ -305,7 +303,7 @@ class ParallelFactorize:
param_names = ["threads"]
def setup(self, threads):
- strings = tm.makeStringIndex(100000)
+ strings = Index([f"i-{i}" for i in range(100000)], dtype=object)
@test_parallel(num_threads=threads)
def parallel():
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 202ba6e981b70..abffa1f702b9c 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -17,8 +17,6 @@
to_timedelta,
)
-from .pandas_vb_common import tm
-
method_blocklist = {
"object": {
"diff",
@@ -167,10 +165,14 @@ def setup_cache(self):
"int64_small": Series(np.random.randint(0, 100, size=size)),
"int64_large": Series(np.random.randint(0, 10000, size=size)),
"object_small": Series(
- tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size))
+ Index([f"i-{i}" for i in range(100)], dtype=object).take(
+ np.random.randint(0, 100, size=size)
+ )
),
"object_large": Series(
- tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=size))
+ Index([f"i-{i}" for i in range(10000)], dtype=object).take(
+ np.random.randint(0, 10000, size=size)
+ )
),
}
return data
@@ -912,7 +914,7 @@ def setup(self):
n1 = 400
n2 = 250
index = MultiIndex(
- levels=[np.arange(n1), tm.makeStringIndex(n2)],
+ levels=[np.arange(n1), Index([f"i-{i}" for i in range(n2)], dtype=object)],
codes=[np.repeat(range(n1), n2).tolist(), list(range(n2)) * n1],
names=["lev1", "lev2"],
)
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index 7e33223260e0f..637b1b40f59a3 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -12,8 +12,6 @@
date_range,
)
-from .pandas_vb_common import tm
-
class SetOperations:
params = (
@@ -30,7 +28,7 @@ def setup(self, index_structure, dtype, method):
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
ea_int_left = Index(np.arange(N), dtype="Int64")
- str_left = tm.makeStringIndex(N)
+ str_left = Index([f"i-{i}" for i in range(N)], dtype=object)
data = {
"datetime": dates_left,
@@ -155,7 +153,12 @@ class Indexing:
def setup(self, dtype):
N = 10**6
- self.idx = getattr(tm, f"make{dtype}Index")(N)
+ if dtype == "String":
+ self.idx = Index([f"i-{i}" for i in range(N)], dtype=object)
+ elif dtype == "Float":
+ self.idx = Index(np.arange(N), dtype=np.float64)
+ elif dtype == "Int":
+ self.idx = Index(np.arange(N), dtype=np.int64)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index 4961722c0e9cd..9ad1f5b31016d 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -22,8 +22,6 @@
period_range,
)
-from .pandas_vb_common import tm
-
class NumericSeriesIndexing:
params = [
@@ -124,7 +122,7 @@ class NonNumericSeriesIndexing:
def setup(self, index, index_structure):
N = 10**6
if index == "string":
- index = tm.makeStringIndex(N)
+ index = Index([f"i-{i}" for i in range(N)], dtype=object)
elif index == "datetime":
index = date_range("1900", periods=N, freq="s")
elif index == "period":
@@ -156,8 +154,8 @@ def time_getitem_list_like(self, index, index_structure):
class DataFrameStringIndexing:
def setup(self):
- index = tm.makeStringIndex(1000)
- columns = tm.makeStringIndex(30)
+ index = Index([f"i-{i}" for i in range(1000)], dtype=object)
+ columns = Index([f"i-{i}" for i in range(30)], dtype=object)
with warnings.catch_warnings(record=True):
self.df = DataFrame(np.random.randn(1000, 30), index=index, columns=columns)
self.idx_scalar = index[100]
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index 805b0c807452c..d5c58033c1157 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -9,6 +9,7 @@
import numpy as np
from pandas import (
+ Index,
NaT,
Series,
date_range,
@@ -17,10 +18,7 @@
to_timedelta,
)
-from .pandas_vb_common import (
- lib,
- tm,
-)
+from .pandas_vb_common import lib
class ToNumeric:
@@ -31,7 +29,7 @@ def setup(self, errors):
N = 10000
self.float = Series(np.random.randn(N))
self.numstr = self.float.astype("str")
- self.str = Series(tm.makeStringIndex(N))
+ self.str = Series(Index([f"i-{i}" for i in range(N)], dtype=object))
def time_from_float(self, errors):
to_numeric(self.float, errors=errors)
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py
index a45315f63d62e..9ac83db4f85b9 100644
--- a/asv_bench/benchmarks/io/csv.py
+++ b/asv_bench/benchmarks/io/csv.py
@@ -10,6 +10,7 @@
from pandas import (
Categorical,
DataFrame,
+ Index,
concat,
date_range,
period_range,
@@ -17,10 +18,7 @@
to_datetime,
)
-from ..pandas_vb_common import (
- BaseIO,
- tm,
-)
+from ..pandas_vb_common import BaseIO
class ToCSV(BaseIO):
@@ -288,7 +286,7 @@ class ReadCSVSkipRows(BaseIO):
def setup(self, skiprows, engine):
N = 20000
- index = tm.makeStringIndex(N)
+ index = Index([f"i-{i}" for i in range(N)], dtype=object)
df = DataFrame(
{
"float1": np.random.randn(N),
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index f8d81b0f6a699..902a61be901bd 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -12,12 +12,11 @@
from pandas import (
DataFrame,
ExcelWriter,
+ Index,
date_range,
read_excel,
)
-from ..pandas_vb_common import tm
-
def _generate_dataframe():
N = 2000
@@ -27,7 +26,7 @@ def _generate_dataframe():
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="h"),
)
- df["object"] = tm.makeStringIndex(N)
+ df["object"] = Index([f"i-{i}" for i in range(N)], dtype=object)
return df
diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py
index 195aaa158e178..acf0ec4b9d359 100644
--- a/asv_bench/benchmarks/io/hdf.py
+++ b/asv_bench/benchmarks/io/hdf.py
@@ -3,20 +3,18 @@
from pandas import (
DataFrame,
HDFStore,
+ Index,
date_range,
read_hdf,
)
-from ..pandas_vb_common import (
- BaseIO,
- tm,
-)
+from ..pandas_vb_common import BaseIO
class HDFStoreDataFrame(BaseIO):
def setup(self):
N = 25000
- index = tm.makeStringIndex(N)
+ index = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df = DataFrame(
{"float1": np.random.randn(N), "float2": np.random.randn(N)}, index=index
)
@@ -124,7 +122,7 @@ def setup(self, format):
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="h"),
)
- self.df["object"] = tm.makeStringIndex(N)
+ self.df["object"] = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df.to_hdf(self.fname, "df", format=format)
# Numeric df
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py
index 8a2e3fa87eb37..bcbfcdea42dd9 100644
--- a/asv_bench/benchmarks/io/json.py
+++ b/asv_bench/benchmarks/io/json.py
@@ -4,6 +4,7 @@
from pandas import (
DataFrame,
+ Index,
concat,
date_range,
json_normalize,
@@ -11,10 +12,7 @@
timedelta_range,
)
-from ..pandas_vb_common import (
- BaseIO,
- tm,
-)
+from ..pandas_vb_common import BaseIO
class ReadJSON(BaseIO):
@@ -114,7 +112,7 @@ def setup(self, orient, frame):
ints = np.random.randint(100000000, size=N)
longints = sys.maxsize * np.random.randint(100000000, size=N)
floats = np.random.randn(N)
- strings = tm.makeStringIndex(N)
+ strings = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
self.df_td_int_ts = DataFrame(
@@ -220,7 +218,7 @@ def setup(self):
ints = np.random.randint(100000000, size=N)
longints = sys.maxsize * np.random.randint(100000000, size=N)
floats = np.random.randn(N)
- strings = tm.makeStringIndex(N)
+ strings = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
self.df_td_int_ts = DataFrame(
diff --git a/asv_bench/benchmarks/io/pickle.py b/asv_bench/benchmarks/io/pickle.py
index 54631d9236887..4787b57b54756 100644
--- a/asv_bench/benchmarks/io/pickle.py
+++ b/asv_bench/benchmarks/io/pickle.py
@@ -2,14 +2,12 @@
from pandas import (
DataFrame,
+ Index,
date_range,
read_pickle,
)
-from ..pandas_vb_common import (
- BaseIO,
- tm,
-)
+from ..pandas_vb_common import BaseIO
class Pickle(BaseIO):
@@ -22,7 +20,7 @@ def setup(self):
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="h"),
)
- self.df["object"] = tm.makeStringIndex(N)
+ self.df["object"] = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df.to_pickle(self.fname)
def time_read_pickle(self):
diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py
index 6f893ee72d918..e87cc4aaa80c7 100644
--- a/asv_bench/benchmarks/io/sql.py
+++ b/asv_bench/benchmarks/io/sql.py
@@ -5,13 +5,12 @@
from pandas import (
DataFrame,
+ Index,
date_range,
read_sql_query,
read_sql_table,
)
-from ..pandas_vb_common import tm
-
class SQL:
params = ["sqlalchemy", "sqlite"]
@@ -35,7 +34,7 @@ def setup(self, connection):
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
- index=tm.makeStringIndex(N),
+ index=Index([f"i-{i}" for i in range(N)], dtype=object),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
@@ -84,7 +83,7 @@ def setup(self, connection, dtype):
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
- index=tm.makeStringIndex(N),
+ index=Index([f"i-{i}" for i in range(N)], dtype=object),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
@@ -113,7 +112,7 @@ def setup(self):
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
- index=tm.makeStringIndex(N),
+ index=Index([f"i-{i}" for i in range(N)], dtype=object),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
@@ -159,7 +158,7 @@ def setup(self, dtype):
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
- index=tm.makeStringIndex(N),
+ index=Index([f"i-{i}" for i in range(N)], dtype=object),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py
index 750bcf4ccee5c..ff33ededdfed9 100644
--- a/asv_bench/benchmarks/io/stata.py
+++ b/asv_bench/benchmarks/io/stata.py
@@ -2,14 +2,12 @@
from pandas import (
DataFrame,
+ Index,
date_range,
read_stata,
)
-from ..pandas_vb_common import (
- BaseIO,
- tm,
-)
+from ..pandas_vb_common import BaseIO
class Stata(BaseIO):
@@ -25,7 +23,7 @@ def setup(self, convert_dates):
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="h"),
)
- self.df["object"] = tm.makeStringIndex(self.N)
+ self.df["object"] = Index([f"i-{i}" for i in range(self.N)], dtype=object)
self.df["int8_"] = np.random.randint(
np.iinfo(np.int8).min, np.iinfo(np.int8).max - 27, N
)
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 23824c2c748df..6f494562103c2 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -14,8 +14,6 @@
merge_asof,
)
-from .pandas_vb_common import tm
-
try:
from pandas import merge_ordered
except ImportError:
@@ -28,7 +26,7 @@ class Concat:
def setup(self, axis):
N = 1000
- s = Series(N, index=tm.makeStringIndex(N))
+ s = Series(N, index=Index([f"i-{i}" for i in range(N)], dtype=object))
self.series = [s[i:-i] for i in range(1, 10)] * 50
self.small_frames = [DataFrame(np.random.randn(5, 4))] * 1000
df = DataFrame(
@@ -94,7 +92,7 @@ def setup(self, dtype, structure, axis, sort):
elif dtype in ("int64", "Int64", "int64[pyarrow]"):
vals = np.arange(N, dtype=np.int64)
elif dtype in ("string[python]", "string[pyarrow]"):
- vals = tm.makeStringIndex(N)
+ vals = Index([f"i-{i}" for i in range(N)], dtype=object)
else:
raise NotImplementedError
@@ -122,8 +120,8 @@ class Join:
param_names = ["sort"]
def setup(self, sort):
- level1 = tm.makeStringIndex(10).values
- level2 = tm.makeStringIndex(1000).values
+ level1 = Index([f"i-{i}" for i in range(10)], dtype=object).values
+ level2 = Index([f"i-{i}" for i in range(1000)], dtype=object).values
codes1 = np.arange(10).repeat(1000)
codes2 = np.tile(np.arange(1000), 10)
index2 = MultiIndex(levels=[level1, level2], codes=[codes1, codes2])
@@ -231,8 +229,8 @@ class Merge:
def setup(self, sort):
N = 10000
- indices = tm.makeStringIndex(N).values
- indices2 = tm.makeStringIndex(N).values
+ indices = Index([f"i-{i}" for i in range(N)], dtype=object).values
+ indices2 = Index([f"i-{i}" for i in range(N)], dtype=object).values
key = np.tile(indices[:8000], 10)
key2 = np.tile(indices2[:8000], 10)
self.left = DataFrame(
@@ -400,7 +398,7 @@ def time_merge_on_cat_idx(self):
class MergeOrdered:
def setup(self):
- groups = tm.makeStringIndex(10).values
+ groups = Index([f"i-{i}" for i in range(10)], dtype=object).values
self.left = DataFrame(
{
"group": groups.repeat(5000),
diff --git a/asv_bench/benchmarks/libs.py b/asv_bench/benchmarks/libs.py
index f041499c9c622..3419163bcfe09 100644
--- a/asv_bench/benchmarks/libs.py
+++ b/asv_bench/benchmarks/libs.py
@@ -15,13 +15,11 @@
from pandas import (
NA,
+ Index,
NaT,
)
-from .pandas_vb_common import (
- lib,
- tm,
-)
+from .pandas_vb_common import lib
try:
from pandas.util import cache_readonly
@@ -61,8 +59,8 @@ class FastZip:
def setup(self):
N = 10000
K = 10
- key1 = tm.makeStringIndex(N).values.repeat(K)
- key2 = tm.makeStringIndex(N).values.repeat(K)
+ key1 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K)
+ key2 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K)
col_array = np.vstack([key1, key2, np.random.randn(N * K)])
col_array2 = col_array.copy()
col_array2[:, :10000] = np.nan
diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py
index 87dcdb16fa647..54788d41d83fe 100644
--- a/asv_bench/benchmarks/multiindex_object.py
+++ b/asv_bench/benchmarks/multiindex_object.py
@@ -5,6 +5,7 @@
from pandas import (
NA,
DataFrame,
+ Index,
MultiIndex,
RangeIndex,
Series,
@@ -12,8 +13,6 @@
date_range,
)
-from .pandas_vb_common import tm
-
class GetLoc:
def setup(self):
@@ -144,7 +143,11 @@ def time_is_monotonic(self):
class Duplicated:
def setup(self):
n, k = 200, 5000
- levels = [np.arange(n), tm.makeStringIndex(n).values, 1000 + np.arange(n)]
+ levels = [
+ np.arange(n),
+ Index([f"i-{i}" for i in range(n)], dtype=object).values,
+ 1000 + np.arange(n),
+ ]
codes = [np.random.choice(n, (k * n)) for lev in levels]
self.mi = MultiIndex(levels=levels, codes=codes)
@@ -249,7 +252,7 @@ def setup(self, index_structure, dtype, method, sort):
level2 = range(N // 1000)
int_left = MultiIndex.from_product([level1, level2])
- level2 = tm.makeStringIndex(N // 1000).values
+ level2 = Index([f"i-{i}" for i in range(N // 1000)], dtype=object).values
str_left = MultiIndex.from_product([level1, level2])
level2 = range(N // 1000)
@@ -293,7 +296,7 @@ def setup(self, dtype):
level2[0] = NA
ea_int_left = MultiIndex.from_product([level1, level2])
- level2 = tm.makeStringIndex(N // 1000).values
+ level2 = Index([f"i-{i}" for i in range(N // 1000)], dtype=object).values
str_left = MultiIndex.from_product([level1, level2])
data = {
@@ -354,7 +357,7 @@ def setup(self, dtype):
level2 = range(N // 1000)
int_midx = MultiIndex.from_product([level1, level2])
- level2 = tm.makeStringIndex(N // 1000).values
+ level2 = Index([f"i-{i}" for i in range(N // 1000)], dtype=object).values
str_midx = MultiIndex.from_product([level1, level2])
data = {
@@ -411,7 +414,7 @@ def setup(self, dtype):
elif dtype == "int64":
level2 = range(N2)
elif dtype == "string":
- level2 = tm.makeStringIndex(N2)
+ level2 = Index([f"i-{i}" for i in range(N2)], dtype=object)
else:
raise NotImplementedError
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index 1c5e6050db275..3d22bfce7e2b2 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -9,8 +9,6 @@
period_range,
)
-from .pandas_vb_common import tm
-
class Reindex:
def setup(self):
@@ -23,8 +21,8 @@ def setup(self):
)
N = 5000
K = 200
- level1 = tm.makeStringIndex(N).values.repeat(K)
- level2 = np.tile(tm.makeStringIndex(K).values, N)
+ level1 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K)
+ level2 = np.tile(Index([f"i-{i}" for i in range(K)], dtype=object).values, N)
index = MultiIndex.from_arrays([level1, level2])
self.s = Series(np.random.randn(N * K), index=index)
self.s_subset = self.s[::2]
@@ -93,8 +91,8 @@ class DropDuplicates:
def setup(self, inplace):
N = 10000
K = 10
- key1 = tm.makeStringIndex(N).values.repeat(K)
- key2 = tm.makeStringIndex(N).values.repeat(K)
+ key1 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K)
+ key2 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K)
self.df = DataFrame(
{"key1": key1, "key2": key2, "value": np.random.randn(N * K)}
)
@@ -102,7 +100,9 @@ def setup(self, inplace):
self.df_nan.iloc[:10000, :] = np.nan
self.s = Series(np.random.randint(0, 1000, size=10000))
- self.s_str = Series(np.tile(tm.makeStringIndex(1000).values, 10))
+ self.s_str = Series(
+ np.tile(Index([f"i-{i}" for i in range(1000)], dtype=object).values, 10)
+ )
N = 1000000
K = 10000
@@ -133,7 +133,7 @@ class Align:
# blog "pandas escaped the zoo"
def setup(self):
n = 50000
- indices = tm.makeStringIndex(n)
+ indices = Index([f"i-{i}" for i in range(n)], dtype=object)
subsample_size = 40000
self.x = Series(np.random.randn(n), indices)
self.y = Series(
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 79cf8f9cd2048..b021af4694d7d 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -10,8 +10,6 @@
date_range,
)
-from .pandas_vb_common import tm
-
class SeriesConstructor:
def setup(self):
@@ -253,7 +251,7 @@ def time_mode(self, N):
class Dir:
def setup(self):
- self.s = Series(index=tm.makeStringIndex(10000))
+ self.s = Series(index=Index([f"i-{i}" for i in range(10000)], dtype=object))
def time_dir_strings(self):
dir(self.s)
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index 6682a60f42997..1f4a104255057 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -6,12 +6,11 @@
NA,
Categorical,
DataFrame,
+ Index,
Series,
)
from pandas.arrays import StringArray
-from .pandas_vb_common import tm
-
class Dtypes:
params = ["str", "string[python]", "string[pyarrow]"]
@@ -19,7 +18,9 @@ class Dtypes:
def setup(self, dtype):
try:
- self.s = Series(tm.makeStringIndex(10**5), dtype=dtype)
+ self.s = Series(
+ Index([f"i-{i}" for i in range(10000)], dtype=object), dtype=dtype
+ )
except ImportError:
raise NotImplementedError
@@ -172,7 +173,7 @@ class Repeat:
def setup(self, repeats):
N = 10**5
- self.s = Series(tm.makeStringIndex(N))
+ self.s = Series(Index([f"i-{i}" for i in range(N)], dtype=object))
repeat = {"int": 1, "array": np.random.randint(1, 3, N)}
self.values = repeat[repeats]
@@ -187,13 +188,20 @@ class Cat:
def setup(self, other_cols, sep, na_rep, na_frac):
N = 10**5
mask_gen = lambda: np.random.choice([True, False], N, p=[1 - na_frac, na_frac])
- self.s = Series(tm.makeStringIndex(N)).where(mask_gen())
+ self.s = Series(Index([f"i-{i}" for i in range(N)], dtype=object)).where(
+ mask_gen()
+ )
if other_cols == 0:
# str.cat self-concatenates only for others=None
self.others = None
else:
self.others = DataFrame(
- {i: tm.makeStringIndex(N).where(mask_gen()) for i in range(other_cols)}
+ {
+ i: Index([f"i-{i}" for i in range(N)], dtype=object).where(
+ mask_gen()
+ )
+ for i in range(other_cols)
+ }
)
def time_cat(self, other_cols, sep, na_rep, na_frac):
@@ -254,7 +262,7 @@ def time_get_dummies(self, dtype):
class Encode:
def setup(self):
- self.ser = Series(tm.makeStringIndex())
+ self.ser = Series(Index([f"i-{i}" for i in range(10_000)], dtype=object))
def time_encode_decode(self):
self.ser.str.encode("utf-8").str.decode("utf-8")
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index a74fb2bf48bc4..c73d869b6c39c 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -370,11 +370,6 @@ def getCols(k) -> str:
return string.ascii_uppercase[:k]
-# make index
-def makeStringIndex(k: int = 10, name=None) -> Index:
- return Index(rands_array(nchars=10, size=k), name=name)
-
-
def makeCategoricalIndex(
k: int = 10, n: int = 3, name=None, **kwargs
) -> CategoricalIndex:
@@ -385,14 +380,6 @@ def makeCategoricalIndex(
)
-def makeBoolIndex(k: int = 10, name=None) -> Index:
- if k == 1:
- return Index([True], name=name)
- elif k == 2:
- return Index([False, True], name=name)
- return Index([False, True] + [False] * (k - 2), name=name)
-
-
def makeNumericIndex(k: int = 10, *, name=None, dtype: Dtype | None) -> Index:
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
@@ -457,14 +444,13 @@ def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
def makeObjectSeries(name=None) -> Series:
- data = makeStringIndex(_N)
- data = Index(data, dtype=object)
- index = makeStringIndex(_N)
- return Series(data, index=index, name=name)
+ data = [f"foo_{i}" for i in range(_N)]
+ index = Index([f"bar_{i}" for i in range(_N)])
+ return Series(data, index=index, name=name, dtype=object)
def getSeriesData() -> dict[str, Series]:
- index = makeStringIndex(_N)
+ index = Index([f"foo_{i}" for i in range(_N)])
return {
c: Series(np.random.default_rng(i).standard_normal(_N), index=index)
for i, c in enumerate(getCols(_K))
@@ -566,7 +552,7 @@ def makeCustomIndex(
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
- "s": makeStringIndex,
+ "s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]),
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
@@ -1049,7 +1035,6 @@ def shares_memory(left, right) -> bool:
"iat",
"iloc",
"loc",
- "makeBoolIndex",
"makeCategoricalIndex",
"makeCustomDataframe",
"makeCustomIndex",
@@ -1062,7 +1047,6 @@ def shares_memory(left, right) -> bool:
"makeObjectSeries",
"makePeriodIndex",
"makeRangeIndex",
- "makeStringIndex",
"makeTimeDataFrame",
"makeTimedeltaIndex",
"makeTimeSeries",
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 350871c3085c1..3205b6657439f 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -610,7 +610,7 @@ def _create_mi_with_dt64tz_level():
indices_dict = {
- "string": tm.makeStringIndex(100),
+ "string": Index([f"pandas_{i}" for i in range(100)]),
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
@@ -626,7 +626,7 @@ def _create_mi_with_dt64tz_level():
"uint64": tm.makeUIntIndex(100, dtype="uint64"),
"float32": tm.makeFloatIndex(100, dtype="float32"),
"float64": tm.makeFloatIndex(100, dtype="float64"),
- "bool-object": tm.makeBoolIndex(10).astype(object),
+ "bool-object": Index([True, False] * 5, dtype=object),
"bool-dtype": Index(np.random.default_rng(2).standard_normal(10) < 0),
"complex64": tm.makeNumericIndex(100, dtype="float64").astype("complex64"),
"complex128": tm.makeNumericIndex(100, dtype="float64").astype("complex128"),
@@ -641,10 +641,12 @@ def _create_mi_with_dt64tz_level():
"nullable_uint": Index(np.arange(100), dtype="UInt16"),
"nullable_float": Index(np.arange(100), dtype="Float32"),
"nullable_bool": Index(np.arange(100).astype(bool), dtype="boolean"),
- "string-python": Index(pd.array(tm.makeStringIndex(100), dtype="string[python]")),
+ "string-python": Index(
+ pd.array([f"pandas_{i}" for i in range(100)], dtype="string[python]")
+ ),
}
if has_pyarrow:
- idx = Index(pd.array(tm.makeStringIndex(100), dtype="string[pyarrow]"))
+ idx = Index(pd.array([f"pandas_{i}" for i in range(100)], dtype="string[pyarrow]"))
indices_dict["string-pyarrow"] = idx
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index f89711c0edee7..c3c4a8b4fc6c0 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -916,7 +916,7 @@ def test_add_frames(self, first, second, expected):
# TODO: This came from series.test.test_operators, needs cleanup
def test_series_frame_radd_bug(self, fixed_now_ts):
# GH#353
- vals = Series(tm.makeStringIndex())
+ vals = Series([str(i) for i in range(5)])
result = "foo_" + vals
expected = vals.map(lambda x: "foo_" + x)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index 33953900c2006..6b36f447eb7d5 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -299,7 +299,7 @@ def test_iadd_string(self):
@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="add doesn't work")
def test_add(self):
- index = tm.makeStringIndex(100)
+ index = pd.Index([str(i) for i in range(10)])
expected = pd.Index(index.values * 2)
tm.assert_index_equal(index + index, expected)
tm.assert_index_equal(index + index.tolist(), expected)
@@ -313,7 +313,7 @@ def test_add(self):
tm.assert_index_equal("1" + index, expected)
def test_sub_fail(self, using_infer_string):
- index = tm.makeStringIndex(100)
+ index = pd.Index([str(i) for i in range(10)])
if using_infer_string:
import pyarrow as pa
diff --git a/pandas/tests/frame/methods/test_first_valid_index.py b/pandas/tests/frame/methods/test_first_valid_index.py
index a448768f4173d..2e27f1aa71700 100644
--- a/pandas/tests/frame/methods/test_first_valid_index.py
+++ b/pandas/tests/frame/methods/test_first_valid_index.py
@@ -6,9 +6,10 @@
from pandas import (
DataFrame,
+ Index,
Series,
+ date_range,
)
-import pandas._testing as tm
class TestFirstValidIndex:
@@ -44,11 +45,12 @@ def test_first_last_valid_frame(self, data, idx, expected_first, expected_last):
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
- @pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex])
- def test_first_last_valid(self, index_func):
- N = 30
- index = index_func(N)
- mat = np.random.default_rng(2).standard_normal(N)
+ @pytest.mark.parametrize(
+ "index",
+ [Index([str(i) for i in range(20)]), date_range("2020-01-01", periods=20)],
+ )
+ def test_first_last_valid(self, index):
+ mat = np.random.default_rng(2).standard_normal(len(index))
mat[:5] = np.nan
mat[-5:] = np.nan
@@ -60,10 +62,12 @@ def test_first_last_valid(self, index_func):
assert ser.first_valid_index() == frame.index[5]
assert ser.last_valid_index() == frame.index[-6]
- @pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex])
- def test_first_last_valid_all_nan(self, index_func):
+ @pytest.mark.parametrize(
+ "index",
+ [Index([str(i) for i in range(10)]), date_range("2020-01-01", periods=10)],
+ )
+ def test_first_last_valid_all_nan(self, index):
# GH#17400: no valid entries
- index = index_func(30)
frame = DataFrame(np.nan, columns=["foo"], index=index)
assert frame.last_valid_index() is None
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index bf17b61b0e3f3..3111075c5c1a7 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -784,7 +784,7 @@ def test_constructor_dict_cast(self):
def test_constructor_dict_cast2(self):
# can't cast to float
test_data = {
- "A": dict(zip(range(20), tm.makeStringIndex(20))),
+ "A": dict(zip(range(20), [f"word_{i}" for i in range(20)])),
"B": dict(zip(range(15), np.random.default_rng(2).standard_normal(15))),
}
with pytest.raises(ValueError, match="could not convert string"):
diff --git a/pandas/tests/frame/test_repr.py b/pandas/tests/frame/test_repr.py
index 98962b3003b6d..eed48b9db116b 100644
--- a/pandas/tests/frame/test_repr.py
+++ b/pandas/tests/frame/test_repr.py
@@ -165,7 +165,7 @@ def test_repr_mixed_big(self):
biggie = DataFrame(
{
"A": np.random.default_rng(2).standard_normal(200),
- "B": tm.makeStringIndex(200),
+ "B": [str(i) for i in range(200)],
},
index=range(200),
)
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 3c1a35c984031..c401762dace23 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -19,6 +19,7 @@
Series,
Timestamp,
date_range,
+ period_range,
)
import pandas._testing as tm
from pandas.core.groupby.grouper import Grouping
@@ -174,23 +175,21 @@ class TestGrouping:
@pytest.mark.parametrize(
"index",
[
- tm.makeFloatIndex,
- tm.makeStringIndex,
- tm.makeIntIndex,
- tm.makeDateIndex,
- tm.makePeriodIndex,
+ Index(list("abcde")),
+ Index(np.arange(5)),
+ Index(np.arange(5, dtype=float)),
+ date_range("2020-01-01", periods=5),
+ period_range("2020-01-01", periods=5),
],
)
- @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_grouper_index_types(self, index):
# related GH5375
# groupby misbehaving when using a Floatlike index
- df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB"))
+ df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB"), index=index)
- df.index = index(len(df))
df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
- df.index = list(reversed(df.index.tolist()))
+ df.index = df.index[::-1]
df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
def test_grouper_multilevel_freq(self):
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index a69248cf038f8..6c6d9022b1af3 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -257,7 +257,7 @@ def test_duplicated(idx_dup, keep, expected):
def test_duplicated_hashtable_impl(keep, monkeypatch):
# GH 9125
n, k = 6, 10
- levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
+ levels = [np.arange(n), [str(i) for i in range(n)], 1000 + np.arange(n)]
codes = [np.random.default_rng(2).choice(n, k * n) for _ in levels]
with monkeypatch.context() as m:
m.setattr(libindex, "_SIZE_CUTOFF", 50)
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index c9fbf95751dfe..cf9966145afce 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -6,6 +6,9 @@
Index,
RangeIndex,
Series,
+ date_range,
+ period_range,
+ timedelta_range,
)
import pandas._testing as tm
@@ -39,22 +42,21 @@ def check(self, result, original, indexer, getitem):
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
- "index_func",
+ "index",
[
- tm.makeStringIndex,
- tm.makeCategoricalIndex,
- tm.makeDateIndex,
- tm.makeTimedeltaIndex,
- tm.makePeriodIndex,
+ Index(list("abcde")),
+ Index(list("abcde"), dtype="category"),
+ date_range("2020-01-01", periods=5),
+ timedelta_range("1 day", periods=5),
+ period_range("2020-01-01", periods=5),
],
)
- def test_scalar_non_numeric(self, index_func, frame_or_series, indexer_sl):
+ def test_scalar_non_numeric(self, index, frame_or_series, indexer_sl):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
- i = index_func(5)
- s = gen_obj(frame_or_series, i)
+ s = gen_obj(frame_or_series, index)
# getting
with pytest.raises(KeyError, match="^3.0$"):
@@ -75,19 +77,18 @@ def test_scalar_non_numeric(self, index_func, frame_or_series, indexer_sl):
assert 3.0 not in s2.axes[-1]
@pytest.mark.parametrize(
- "index_func",
+ "index",
[
- tm.makeStringIndex,
- tm.makeCategoricalIndex,
- tm.makeDateIndex,
- tm.makeTimedeltaIndex,
- tm.makePeriodIndex,
+ Index(list("abcde")),
+ Index(list("abcde"), dtype="category"),
+ date_range("2020-01-01", periods=5),
+ timedelta_range("1 day", periods=5),
+ period_range("2020-01-01", periods=5),
],
)
- def test_scalar_non_numeric_series_fallback(self, index_func):
+ def test_scalar_non_numeric_series_fallback(self, index):
# fallsback to position selection, series only
- i = index_func(5)
- s = Series(np.arange(len(i)), index=i)
+ s = Series(np.arange(len(index)), index=index)
msg = "Series.__getitem__ treating keys as positions is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
@@ -214,21 +215,20 @@ def test_scalar_float(self, frame_or_series):
self.check(result, s, 3, False)
@pytest.mark.parametrize(
- "index_func",
+ "index",
[
- tm.makeStringIndex,
- tm.makeDateIndex,
- tm.makeTimedeltaIndex,
- tm.makePeriodIndex,
+ Index(list("abcde"), dtype=object),
+ date_range("2020-01-01", periods=5),
+ timedelta_range("1 day", periods=5),
+ period_range("2020-01-01", periods=5),
],
)
@pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)])
- def test_slice_non_numeric(self, index_func, idx, frame_or_series, indexer_sli):
+ def test_slice_non_numeric(self, index, idx, frame_or_series, indexer_sli):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
- index = index_func(5)
s = gen_obj(frame_or_series, index)
# getitem
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index edac82193d1c8..f5738b83a8e64 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -25,7 +25,6 @@
read_csv,
reset_option,
)
-import pandas._testing as tm
from pandas.io.formats import printing
import pandas.io.formats.format as fmt
@@ -834,19 +833,21 @@ def test_to_string_buffer_all_unicode(self):
buf.getvalue()
@pytest.mark.parametrize(
- "index",
+ "index_scalar",
[
- tm.makeStringIndex,
- tm.makeIntIndex,
- tm.makeDateIndex,
- tm.makePeriodIndex,
+ "a" * 10,
+ 1,
+ Timestamp(2020, 1, 1),
+ pd.Period("2020-01-01"),
],
)
@pytest.mark.parametrize("h", [10, 20])
@pytest.mark.parametrize("w", [10, 20])
- def test_to_string_truncate_indices(self, index, h, w):
+ def test_to_string_truncate_indices(self, index_scalar, h, w):
with option_context("display.expand_frame_repr", False):
- df = DataFrame(index=index(h), columns=tm.makeStringIndex(w))
+ df = DataFrame(
+ index=[index_scalar] * h, columns=[str(i) * 10 for i in range(w)]
+ )
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index a7648cf1c471a..605e5e182d8cc 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -59,7 +59,7 @@ def biggie_df_fixture(request):
df = DataFrame(
{
"A": np.random.default_rng(2).standard_normal(200),
- "B": tm.makeStringIndex(200),
+ "B": Index([f"{i}?!" for i in range(200)]),
},
index=np.arange(200),
)
diff --git a/pandas/tests/io/formats/test_to_string.py b/pandas/tests/io/formats/test_to_string.py
index e607b6eb454a1..02c20b0e25477 100644
--- a/pandas/tests/io/formats/test_to_string.py
+++ b/pandas/tests/io/formats/test_to_string.py
@@ -804,7 +804,7 @@ def test_to_string(self):
biggie = DataFrame(
{
"A": np.random.default_rng(2).standard_normal(200),
- "B": tm.makeStringIndex(200),
+ "B": Index([f"{i}?!" for i in range(200)]),
},
)
diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py
index c109b72e9c239..59a05dc9ea546 100644
--- a/pandas/tests/io/pytables/test_put.py
+++ b/pandas/tests/io/pytables/test_put.py
@@ -196,32 +196,27 @@ def test_put_mixed_type(setup_path):
tm.assert_frame_equal(expected, df)
+@pytest.mark.parametrize("format", ["table", "fixed"])
@pytest.mark.parametrize(
- "format, index",
+ "index",
[
- ["table", tm.makeFloatIndex],
- ["table", tm.makeStringIndex],
- ["table", tm.makeIntIndex],
- ["table", tm.makeDateIndex],
- ["fixed", tm.makeFloatIndex],
- ["fixed", tm.makeStringIndex],
- ["fixed", tm.makeIntIndex],
- ["fixed", tm.makeDateIndex],
- ["table", tm.makePeriodIndex], # GH#7796
- ["fixed", tm.makePeriodIndex],
+ Index([str(i) for i in range(10)]),
+ Index(np.arange(10, dtype=float)),
+ Index(np.arange(10)),
+ date_range("2020-01-01", periods=10),
+ pd.period_range("2020-01-01", periods=10),
],
)
-@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_store_index_types(setup_path, format, index):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
df = DataFrame(
- np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
+ np.random.default_rng(2).standard_normal((10, 2)),
+ columns=list("AB"),
+ index=index,
)
- df.index = index(len(df))
-
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 6c24843f18d0d..d06935871cb56 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -51,7 +51,8 @@ def roundtrip(key, obj, **kwargs):
def test_long_strings(setup_path):
# GH6166
- df = DataFrame({"a": tm.makeStringIndex(10)}, index=tm.makeStringIndex(10))
+ data = ["a" * 50] * 10
+ df = DataFrame({"a": data}, index=data)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
@@ -65,7 +66,7 @@ def test_api(tmp_path, setup_path):
# API issue when to_hdf doesn't accept append AND format args
path = tmp_path / setup_path
- df = tm.makeDataFrame()
+ df = DataFrame(range(20))
df.iloc[:10].to_hdf(path, key="df", append=True, format="table")
df.iloc[10:].to_hdf(path, key="df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
@@ -79,7 +80,7 @@ def test_api(tmp_path, setup_path):
def test_api_append(tmp_path, setup_path):
path = tmp_path / setup_path
- df = tm.makeDataFrame()
+ df = DataFrame(range(20))
df.iloc[:10].to_hdf(path, key="df", append=True)
df.iloc[10:].to_hdf(path, key="df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
@@ -93,7 +94,7 @@ def test_api_append(tmp_path, setup_path):
def test_api_2(tmp_path, setup_path):
path = tmp_path / setup_path
- df = tm.makeDataFrame()
+ df = DataFrame(range(20))
df.to_hdf(path, key="df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
@@ -107,7 +108,7 @@ def test_api_2(tmp_path, setup_path):
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
- df = tm.makeDataFrame()
+ df = DataFrame(range(20))
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 96c160ab40bd8..df8a1e3cb7470 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -956,10 +956,6 @@ def test_to_hdf_with_object_column_names(tmp_path, setup_path):
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
]
- types_should_run = [
- tm.makeStringIndex,
- tm.makeCategoricalIndex,
- ]
for index in types_should_fail:
df = DataFrame(
@@ -970,14 +966,18 @@ def test_to_hdf_with_object_column_names(tmp_path, setup_path):
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, key="df", format="table", data_columns=True)
- for index in types_should_run:
- df = DataFrame(
- np.random.default_rng(2).standard_normal((10, 2)), columns=index(2)
- )
- path = tmp_path / setup_path
- df.to_hdf(path, key="df", format="table", data_columns=True)
- result = read_hdf(path, "df", where=f"index = [{df.index[0]}]")
- assert len(result)
+
+@pytest.mark.parametrize("dtype", [None, "category"])
+def test_to_hdf_with_object_column_names_should_run(tmp_path, setup_path, dtype):
+ # GH9057
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 2)),
+ columns=Index(["a", "b"], dtype=dtype),
+ )
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table", data_columns=True)
+ result = read_hdf(path, "df", where=f"index = [{df.index[0]}]")
+ assert len(result)
def test_hdfstore_strides(setup_path):
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 9c4ae92224148..303f8550c5a80 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -33,13 +33,13 @@
def get_objs():
indexes = [
- tm.makeBoolIndex(10, name="a"),
+ Index([True, False] * 5, name="a"),
tm.makeIntIndex(10, name="a"),
tm.makeFloatIndex(10, name="a"),
tm.makeDateIndex(10, name="a"),
tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern"),
tm.makePeriodIndex(10, name="a"),
- tm.makeStringIndex(10, name="a"),
+ Index([str(i) for i in range(10)], name="a"),
]
arr = np.random.default_rng(2).standard_normal(10)
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index 1dc25cb9d4c1e..3d9098917a12d 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -87,19 +87,17 @@ def f(df):
@pytest.mark.parametrize(
- "func",
+ "index",
[
- tm.makeIntIndex,
- tm.makeStringIndex,
- tm.makeFloatIndex,
- (lambda m: tm.makeCustomIndex(m, 2)),
+ Index([1, 2]),
+ Index(["a", "b"]),
+ Index([1.1, 2.2]),
+ pd.MultiIndex.from_arrays([[1, 2], ["a", "b"]]),
],
)
-def test_fails_on_no_datetime_index(func):
- n = 2
- index = func(n)
+def test_fails_on_no_datetime_index(index):
name = type(index).__name__
- df = DataFrame({"a": np.random.default_rng(2).standard_normal(n)}, index=index)
+ df = DataFrame({"a": range(len(index))}, index=index)
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex "
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index f5d78fbd44812..269d3a2b7078e 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -188,7 +188,7 @@ def test_merge_multiple_cols_with_mixed_cols_index(self):
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
- key1 = tm.makeStringIndex(10000)
+ key1 = [str(i) for i in range(10000)]
key1 = np.tile(key1, 2)
key2 = key1[::-1]
diff --git a/pandas/tests/series/methods/test_combine_first.py b/pandas/tests/series/methods/test_combine_first.py
index 795b2eab82aca..1cbb7c7982802 100644
--- a/pandas/tests/series/methods/test_combine_first.py
+++ b/pandas/tests/series/methods/test_combine_first.py
@@ -51,9 +51,9 @@ def test_combine_first(self):
tm.assert_series_equal(combined[1::2], series_copy[1::2])
# mixed types
- index = tm.makeStringIndex(20)
+ index = pd.Index([str(i) for i in range(20)])
floats = Series(np.random.default_rng(2).standard_normal(20), index=index)
- strings = Series(tm.makeStringIndex(10), index=index[::2], dtype=object)
+ strings = Series([str(i) for i in range(10)], index=index[::2], dtype=object)
combined = strings.combine_first(floats)
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index a39b3ff7e6f2b..c29fe6ba06ab4 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -68,7 +68,7 @@ def test_tab_completion_with_categorical(self):
@pytest.mark.parametrize(
"index",
[
- tm.makeStringIndex(10),
+ Index([str(i) for i in range(10)]),
tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index a6e63dfd5f409..24c4706810154 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1663,19 +1663,18 @@ def test_unique_complex_numbers(self, array, expected):
class TestHashTable:
@pytest.mark.parametrize(
- "htable, tm_dtype",
+ "htable, data",
[
- (ht.PyObjectHashTable, "String"),
- (ht.StringHashTable, "String"),
- (ht.Float64HashTable, "Float"),
- (ht.Int64HashTable, "Int"),
- (ht.UInt64HashTable, "UInt"),
+ (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]),
+ (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]),
+ (ht.Float64HashTable, np.arange(1000, dtype=np.float64)),
+ (ht.Int64HashTable, np.arange(1000, dtype=np.int64)),
+ (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)),
],
)
- def test_hashtable_unique(self, htable, tm_dtype, writable):
+ def test_hashtable_unique(self, htable, data, writable):
# output of maker has guaranteed unique elements
- maker = getattr(tm, "make" + tm_dtype + "Index")
- s = Series(maker(1000))
+ s = Series(data)
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
@@ -1703,19 +1702,18 @@ def test_hashtable_unique(self, htable, tm_dtype, writable):
tm.assert_numpy_array_equal(reconstr, s_duplicated.values)
@pytest.mark.parametrize(
- "htable, tm_dtype",
+ "htable, data",
[
- (ht.PyObjectHashTable, "String"),
- (ht.StringHashTable, "String"),
- (ht.Float64HashTable, "Float"),
- (ht.Int64HashTable, "Int"),
- (ht.UInt64HashTable, "UInt"),
+ (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]),
+ (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]),
+ (ht.Float64HashTable, np.arange(1000, dtype=np.float64)),
+ (ht.Int64HashTable, np.arange(1000, dtype=np.int64)),
+ (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)),
],
)
- def test_hashtable_factorize(self, htable, tm_dtype, writable):
+ def test_hashtable_factorize(self, htable, writable, data):
# output of maker has guaranteed unique elements
- maker = getattr(tm, "make" + tm_dtype + "Index")
- s = Series(maker(1000))
+ s = Series(data)
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py
index 75528a8b99c4d..5d22896d9d055 100644
--- a/pandas/tests/tseries/frequencies/test_inference.py
+++ b/pandas/tests/tseries/frequencies/test_inference.py
@@ -401,7 +401,7 @@ def test_invalid_index_types_unicode():
msg = "Unknown datetime string format"
with pytest.raises(ValueError, match=msg):
- frequencies.infer_freq(tm.makeStringIndex(10))
+ frequencies.infer_freq(Index(["ZqgszYBfuL"]))
def test_string_datetime_like_compat():
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index 00dc184a0ac4d..e7c4c27714d5f 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -328,9 +328,9 @@ def test_alternate_encoding(index):
@pytest.mark.parametrize("l_add", [0, 1])
def test_same_len_hash_collisions(l_exp, l_add):
length = 2 ** (l_exp + 8) + l_add
- s = tm.makeStringIndex(length).to_numpy()
+ idx = np.array([str(i) for i in range(length)], dtype=object)
- result = hash_array(s, "utf8")
+ result = hash_array(idx, "utf8")
assert not result[0] == result[1]
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56155 | 2023-11-24T20:48:49Z | 2023-11-27T02:59:21Z | 2023-11-27T02:59:21Z | 2023-11-27T02:59:24Z |
TST/CLN: make equalContents more strict | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 832919db442d4..51de242522074 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -302,13 +302,6 @@ def reset_display_options() -> None:
# Comparators
-def equalContents(arr1, arr2) -> bool:
- """
- Checks if the set of unique elements of arr1 and arr2 are equivalent.
- """
- return frozenset(arr1) == frozenset(arr2)
-
-
def box_expected(expected, box_cls, transpose: bool = True):
"""
Helper function to wrap the expected output of a test in a given box_class.
@@ -1131,7 +1124,6 @@ def shares_memory(left, right) -> bool:
"EMPTY_STRING_PATTERN",
"ENDIAN",
"ensure_clean",
- "equalContents",
"external_error_raised",
"FLOAT_EA_DTYPES",
"FLOAT_NUMPY_DTYPES",
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index b2d94ff5ffbd1..135a86cad1395 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -48,7 +48,7 @@ def test_getitem(self, float_frame):
# Column access
for _, series in sl.items():
assert len(series.index) == 20
- assert tm.equalContents(series.index, sl.index)
+ tm.assert_index_equal(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 1779f703dd2b7..0335279b3a123 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -37,7 +37,7 @@ def test_combine_first(self, float_frame):
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
- assert tm.equalContents(combined.columns, float_frame.columns)
+ tm.assert_index_equal(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py
index b57b4f4422888..b6a6334b89fc1 100644
--- a/pandas/tests/frame/methods/test_reindex.py
+++ b/pandas/tests/frame/methods/test_reindex.py
@@ -624,7 +624,7 @@ def test_reindex(self, float_frame, using_copy_on_write):
assert np.isnan(val)
for col, series in newFrame.items():
- assert tm.equalContents(series.index, newFrame.index)
+ tm.assert_index_equal(series.index, newFrame.index)
emptyFrame = float_frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
@@ -642,7 +642,7 @@ def test_reindex(self, float_frame, using_copy_on_write):
assert np.isnan(val)
for col, series in nonContigFrame.items():
- assert tm.equalContents(series.index, nonContigFrame.index)
+ tm.assert_index_equal(series.index, nonContigFrame.index)
# corner cases
diff --git a/pandas/tests/frame/test_iteration.py b/pandas/tests/frame/test_iteration.py
index 7374a8ea6aa77..a1c23ff05f3e1 100644
--- a/pandas/tests/frame/test_iteration.py
+++ b/pandas/tests/frame/test_iteration.py
@@ -40,7 +40,7 @@ def test_items_names(self, float_string_frame):
assert v.name == k
def test_iter(self, float_frame):
- assert tm.equalContents(list(float_frame), float_frame.columns)
+ assert list(float_frame) == list(float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py
index 488f79eea0d11..e538ad512d691 100644
--- a/pandas/tests/indexes/base_class/test_setops.py
+++ b/pandas/tests/indexes/base_class/test_setops.py
@@ -12,6 +12,13 @@
from pandas.core.algorithms import safe_sort
+def equal_contents(arr1, arr2) -> bool:
+ """
+ Checks if the set of unique elements of arr1 and arr2 are equivalent.
+ """
+ return frozenset(arr1) == frozenset(arr2)
+
+
class TestIndexSetOps:
@pytest.mark.parametrize(
"method", ["union", "intersection", "difference", "symmetric_difference"]
@@ -71,7 +78,7 @@ def test_union_different_type_base(self, klass):
result = first.union(klass(second.values))
- assert tm.equalContents(result, index)
+ assert equal_contents(result, index)
def test_union_sort_other_incomparable(self):
# https://github.com/pandas-dev/pandas/issues/24959
@@ -119,7 +126,7 @@ def test_intersection_different_type_base(self, klass, sort):
second = index[:3]
result = first.intersection(klass(second.values), sort=sort)
- assert tm.equalContents(result, second)
+ assert equal_contents(result, second)
def test_intersection_nosort(self):
result = Index(["c", "b", "a"]).intersection(["b", "a"])
@@ -244,7 +251,7 @@ def test_union_name_preservation(
tm.assert_index_equal(union, expected)
else:
expected = Index(vals, name=expected_name)
- tm.equalContents(union, expected)
+ tm.assert_index_equal(union.sort_values(), expected.sort_values())
@pytest.mark.parametrize(
"diff_type, expected",
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index dde680665a8bc..78c23e47897cf 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -206,13 +206,13 @@ def test_intersection2(self):
first = tm.makeDateIndex(10)
second = first[5:]
intersect = first.intersection(second)
- assert tm.equalContents(intersect, second)
+ tm.assert_index_equal(intersect, second)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
- assert tm.equalContents(result, second)
+ tm.assert_index_equal(result, second)
third = Index(["a", "b", "c"])
result = first.intersection(third)
diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py
index 059b0b75f4190..1b0816a9405cb 100644
--- a/pandas/tests/indexes/interval/test_setops.py
+++ b/pandas/tests/indexes/interval/test_setops.py
@@ -25,14 +25,16 @@ def test_union(self, closed, sort):
expected = monotonic_index(0, 13, closed=closed)
result = index[::-1].union(other, sort=sort)
- if sort is None:
+ if sort in (None, True):
tm.assert_index_equal(result, expected)
- assert tm.equalContents(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
result = other[::-1].union(index, sort=sort)
- if sort is None:
+ if sort in (None, True):
tm.assert_index_equal(result, expected)
- assert tm.equalContents(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
tm.assert_index_equal(index.union(index, sort=sort), index)
tm.assert_index_equal(index.union(index[:1], sort=sort), index)
@@ -65,14 +67,16 @@ def test_intersection(self, closed, sort):
expected = monotonic_index(5, 11, closed=closed)
result = index[::-1].intersection(other, sort=sort)
- if sort is None:
+ if sort in (None, True):
tm.assert_index_equal(result, expected)
- assert tm.equalContents(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
result = other[::-1].intersection(index, sort=sort)
- if sort is None:
+ if sort in (None, True):
tm.assert_index_equal(result, expected)
- assert tm.equalContents(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
tm.assert_index_equal(index.intersection(index, sort=sort), index)
@@ -148,16 +152,18 @@ def test_symmetric_difference(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
result = index[1:].symmetric_difference(index[:-1], sort=sort)
expected = IntervalIndex([index[0], index[-1]])
- if sort is None:
+ if sort in (None, True):
tm.assert_index_equal(result, expected)
- assert tm.equalContents(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
# GH 19101: empty result, same dtype
result = index.symmetric_difference(index, sort=sort)
expected = empty_index(dtype="int64", closed=closed)
- if sort is None:
+ if sort in (None, True):
tm.assert_index_equal(result, expected)
- assert tm.equalContents(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 2b4107acee096..be266f5d8fdce 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -243,10 +243,10 @@ def test_union(idx, sort):
the_union = piece1.union(piece2, sort=sort)
- if sort is None:
- tm.assert_index_equal(the_union, idx.sort_values())
-
- assert tm.equalContents(the_union, idx)
+ if sort in (None, False):
+ tm.assert_index_equal(the_union.sort_values(), idx.sort_values())
+ else:
+ tm.assert_index_equal(the_union, idx)
# corner case, pass self or empty thing:
the_union = idx.union(idx, sort=sort)
@@ -258,7 +258,7 @@ def test_union(idx, sort):
tuples = idx.values
result = idx[:4].union(tuples[4:], sort=sort)
if sort is None:
- tm.equalContents(result, idx)
+ tm.assert_index_equal(result.sort_values(), idx.sort_values())
else:
assert result.equals(idx)
@@ -284,9 +284,10 @@ def test_intersection(idx, sort):
the_int = piece1.intersection(piece2, sort=sort)
- if sort is None:
+ if sort in (None, True):
tm.assert_index_equal(the_int, idx[3:5])
- assert tm.equalContents(the_int, idx[3:5])
+ else:
+ tm.assert_index_equal(the_int.sort_values(), idx[3:5])
# corner case, pass self
the_int = idx.intersection(idx, sort=sort)
diff --git a/pandas/tests/indexes/numeric/test_setops.py b/pandas/tests/indexes/numeric/test_setops.py
index d3789f2477896..376b51dd98bb1 100644
--- a/pandas/tests/indexes/numeric/test_setops.py
+++ b/pandas/tests/indexes/numeric/test_setops.py
@@ -133,7 +133,10 @@ def test_symmetric_difference(self, sort):
index2 = Index([2, 3, 4, 1])
result = index1.symmetric_difference(index2, sort=sort)
expected = Index([5, 1])
- assert tm.equalContents(result, expected)
+ if sort is not None:
+ tm.assert_index_equal(result, expected)
+ else:
+ tm.assert_index_equal(result, expected.sort_values())
assert result.name is None
if sort is None:
expected = expected.sort_values()
diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py
index b9a5940795a5b..2fa7e8cd0d2df 100644
--- a/pandas/tests/indexes/period/test_setops.py
+++ b/pandas/tests/indexes/period/test_setops.py
@@ -142,9 +142,10 @@ def test_union_misc(self, sort):
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]), sort=sort)
- if sort is None:
+ if sort is False:
+ tm.assert_index_equal(result.sort_values(), index)
+ else:
tm.assert_index_equal(result, index)
- assert tm.equalContents(result, index)
# cast if different frequencies
index = period_range("1/1/2000", "1/20/2000", freq="D")
@@ -163,9 +164,10 @@ def test_intersection(self, sort):
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right, sort=sort)
- if sort is None:
+ if sort is False:
+ tm.assert_index_equal(result.sort_values(), index[10:-5])
+ else:
tm.assert_index_equal(result, index[10:-5])
- assert tm.equalContents(result, index[10:-5])
# cast if different frequencies
index = period_range("1/1/2000", "1/20/2000", freq="D")
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index dc624f0271a73..5360f1c6ea6d9 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -492,10 +492,10 @@ def test_union_dt_as_obj(self, simple_index):
first_cat = index.union(date_index)
second_cat = index.union(index)
- appended = np.append(index, date_index.astype("O"))
+ appended = Index(np.append(index, date_index.astype("O")))
- assert tm.equalContents(first_cat, appended)
- assert tm.equalContents(second_cat, index)
+ tm.assert_index_equal(first_cat, appended)
+ tm.assert_index_equal(second_cat, index)
tm.assert_contains_all(index, first_cat)
tm.assert_contains_all(index, second_cat)
tm.assert_contains_all(date_index, first_cat)
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 1f328c06b483b..dab2475240267 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -30,6 +30,13 @@
)
+def equal_contents(arr1, arr2) -> bool:
+ """
+ Checks if the set of unique elements of arr1 and arr2 are equivalent.
+ """
+ return frozenset(arr1) == frozenset(arr2)
+
+
@pytest.fixture(
params=tm.ALL_REAL_NUMPY_DTYPES
+ [
@@ -215,10 +222,10 @@ def test_intersection_base(self, index):
if isinstance(index, CategoricalIndex):
pytest.skip(f"Not relevant for {type(index).__name__}")
- first = index[:5]
- second = index[:3]
+ first = index[:5].unique()
+ second = index[:3].unique()
intersect = first.intersection(second)
- assert tm.equalContents(intersect, second)
+ tm.assert_index_equal(intersect, second)
if isinstance(index.dtype, DatetimeTZDtype):
# The second.values below will drop tz, so the rest of this test
@@ -229,7 +236,7 @@ def test_intersection_base(self, index):
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.intersection(case)
- assert tm.equalContents(result, second)
+ assert equal_contents(result, second)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -241,12 +248,13 @@ def test_intersection_base(self, index):
)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_union_base(self, index):
+ index = index.unique()
first = index[3:]
second = index[:5]
everything = index
union = first.union(second)
- assert tm.equalContents(union, everything)
+ tm.assert_index_equal(union.sort_values(), everything.sort_values())
if isinstance(index.dtype, DatetimeTZDtype):
# The second.values below will drop tz, so the rest of this test
@@ -257,7 +265,7 @@ def test_union_base(self, index):
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.union(case)
- assert tm.equalContents(result, everything)
+ assert equal_contents(result, everything)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -280,13 +288,13 @@ def test_difference_base(self, sort, index):
else:
answer = index[4:]
result = first.difference(second, sort)
- assert tm.equalContents(result, answer)
+ assert equal_contents(result, answer)
# GH#10149
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.difference(case, sort)
- assert tm.equalContents(result, answer)
+ assert equal_contents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -311,13 +319,13 @@ def test_symmetric_difference(self, index):
second = index[:-1]
answer = index[[0, -1]]
result = first.symmetric_difference(second)
- assert tm.equalContents(result, answer)
+ tm.assert_index_equal(result.sort_values(), answer.sort_values())
# GH#10149
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.symmetric_difference(case)
- assert tm.equalContents(result, answer)
+ assert equal_contents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
@@ -701,9 +709,10 @@ def test_intersection(self, index, sort):
first = index[:20]
second = index[:10]
intersect = first.intersection(second, sort=sort)
- if sort is None:
- tm.assert_index_equal(intersect, second.sort_values())
- assert tm.equalContents(intersect, second)
+ if sort in (None, False):
+ tm.assert_index_equal(intersect.sort_values(), second.sort_values())
+ else:
+ tm.assert_index_equal(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
@@ -766,9 +775,10 @@ def test_union(self, index, sort):
everything = index[:20]
union = first.union(second, sort=sort)
- if sort is None:
- tm.assert_index_equal(union, everything.sort_values())
- assert tm.equalContents(union, everything)
+ if sort in (None, False):
+ tm.assert_index_equal(union.sort_values(), everything.sort_values())
+ else:
+ tm.assert_index_equal(union, everything)
@pytest.mark.parametrize("klass", [np.array, Series, list])
@pytest.mark.parametrize("index", ["string"], indirect=True)
@@ -780,9 +790,10 @@ def test_union_from_iterables(self, index, klass, sort):
case = klass(second.values)
result = first.union(case, sort=sort)
- if sort is None:
- tm.assert_index_equal(result, everything.sort_values())
- assert tm.equalContents(result, everything)
+ if sort in (None, False):
+ tm.assert_index_equal(result.sort_values(), everything.sort_values())
+ else:
+ tm.assert_index_equal(result, everything)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union_identity(self, index, sort):
@@ -811,7 +822,11 @@ def test_difference_name_preservation(self, index, second_name, expected, sort):
second.name = second_name
result = first.difference(second, sort=sort)
- assert tm.equalContents(result, answer)
+ if sort is True:
+ tm.assert_index_equal(result, answer)
+ else:
+ answer.name = second_name
+ tm.assert_index_equal(result.sort_values(), answer.sort_values())
if expected is None:
assert result.name is None
@@ -894,7 +909,6 @@ def test_symmetric_difference_mi(self, sort):
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
- assert tm.equalContents(result, expected)
@pytest.mark.parametrize(
"index2,expected",
@@ -916,13 +930,20 @@ def test_symmetric_difference_missing(self, index2, expected, sort):
def test_symmetric_difference_non_index(self, sort):
index1 = Index([1, 2, 3, 4], name="index1")
index2 = np.array([2, 3, 4, 5])
- expected = Index([1, 5])
+ expected = Index([1, 5], name="index1")
result = index1.symmetric_difference(index2, sort=sort)
- assert tm.equalContents(result, expected)
+ if sort in (None, True):
+ tm.assert_index_equal(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
assert result.name == "index1"
result = index1.symmetric_difference(index2, result_name="new_name", sort=sort)
- assert tm.equalContents(result, expected)
+ expected.name = "new_name"
+ if sort in (None, True):
+ tm.assert_index_equal(result, expected)
+ else:
+ tm.assert_index_equal(result.sort_values(), expected)
assert result.name == "new_name"
def test_union_ea_dtypes(self, any_numeric_ea_and_arrow_dtype):
diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py
index 727b4eee00566..fce10d9176d74 100644
--- a/pandas/tests/indexes/timedeltas/test_setops.py
+++ b/pandas/tests/indexes/timedeltas/test_setops.py
@@ -115,7 +115,7 @@ def test_intersection_equal(self, sort):
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
- assert tm.equalContents(intersect, second)
+ tm.assert_index_equal(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 144210166d1a6..45d0a839b9e1a 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -375,7 +375,9 @@ def check_iris_frame(frame: DataFrame):
pytype = frame.dtypes.iloc[0].type
row = frame.iloc[0]
assert issubclass(pytype, np.floating)
- tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
+ tm.assert_series_equal(
+ row, Series([5.1, 3.5, 1.4, 0.2, "Iris-setosa"], index=frame.columns, name=0)
+ )
assert frame.shape in ((150, 5), (8, 5))
@@ -1734,7 +1736,7 @@ def test_api_execute_sql(conn, request):
iris_results = pandas_sql.execute("SELECT * FROM iris")
row = iris_results.fetchone()
iris_results.close()
- tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
+ assert list(row) == [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]
@pytest.mark.parametrize("conn", all_connectable_types)
@@ -2710,7 +2712,7 @@ def test_execute_sql(conn, request):
iris_results = pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
iris_results.close()
- tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
+ assert list(row) == [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]
@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris)
@@ -2726,7 +2728,7 @@ def test_sqlalchemy_read_table_columns(conn, request):
iris_frame = sql.read_sql_table(
"iris", con=conn, columns=["SepalLength", "SepalLength"]
)
- tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
+ tm.assert_index_equal(iris_frame.columns, Index(["SepalLength", "SepalLength__1"]))
@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index b108ec24732ac..0c788b371a03a 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -252,7 +252,7 @@ def test_slice(string_series, object_series, using_copy_on_write, warn_copy_on_w
assert string_series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == string_series.index[11]
- assert tm.equalContents(numSliceEnd, np.array(string_series)[-10:])
+ tm.assert_numpy_array_equal(np.array(numSliceEnd), np.array(string_series)[-10:])
# Test return view.
sl = string_series[10:20]
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index e7233f005e427..6471cd71f0860 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -662,9 +662,9 @@ def test_comparison_operators_with_nas(self, comparison_op):
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
- expected = [True, True, False, True, True]
- assert tm.equalContents(ts.index != 5, expected)
- assert tm.equalContents(~(ts.index == 5), expected)
+ expected = np.array([True, True, False, True, True])
+ tm.assert_numpy_array_equal(ts.index != 5, expected)
+ tm.assert_numpy_array_equal(~(ts.index == 5), expected)
@pytest.mark.parametrize(
"left, right",
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 65726eb8fcbb8..84c612c43da29 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -160,7 +160,7 @@ def test_constructor(self, datetime_series, using_infer_string):
derived = Series(datetime_series)
assert derived.index._is_all_dates
- assert tm.equalContents(derived.index, datetime_series.index)
+ tm.assert_index_equal(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56154 | 2023-11-24T19:13:07Z | 2023-11-24T22:56:12Z | 2023-11-24T22:56:12Z | 2023-11-25T01:22:12Z |
CLN: Remove reset_display_options | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 832919db442d4..788272f9583aa 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -291,13 +291,6 @@
comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"]
-def reset_display_options() -> None:
- """
- Reset the display options for printing and representing objects.
- """
- pd.reset_option("^display.", silent=True)
-
-
# -----------------------------------------------------------------------------
# Comparators
@@ -1182,7 +1175,6 @@ def shares_memory(left, right) -> bool:
"NULL_OBJECTS",
"OBJECT_DTYPES",
"raise_assert_detail",
- "reset_display_options",
"raises_chained_assignment_error",
"round_trip_localpath",
"round_trip_pathlib",
diff --git a/pandas/tests/frame/test_repr.py b/pandas/tests/frame/test_repr.py
index f750074a36e91..98962b3003b6d 100644
--- a/pandas/tests/frame/test_repr.py
+++ b/pandas/tests/frame/test_repr.py
@@ -24,8 +24,6 @@
)
import pandas._testing as tm
-import pandas.io.formats.format as fmt
-
class TestDataFrameRepr:
def test_repr_should_return_str(self):
@@ -220,16 +218,14 @@ def test_repr_unsortable(self):
def test_repr_float_frame_options(self, float_frame):
repr(float_frame)
- fmt.set_option("display.precision", 3)
- repr(float_frame)
+ with option_context("display.precision", 3):
+ repr(float_frame)
- fmt.set_option("display.max_rows", 10, "display.max_columns", 2)
- repr(float_frame)
-
- fmt.set_option("display.max_rows", 1000, "display.max_columns", 1000)
- repr(float_frame)
+ with option_context("display.max_rows", 10, "display.max_columns", 2):
+ repr(float_frame)
- tm.reset_display_options()
+ with option_context("display.max_rows", 1000, "display.max_columns", 1000):
+ repr(float_frame)
def test_repr_unicode(self):
uval = "\u03c3\u03c3\u03c3\u03c3"
diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py
index 75e63cb1f6b54..6d581b5b92e0c 100644
--- a/pandas/tests/io/formats/test_eng_formatting.py
+++ b/pandas/tests/io/formats/test_eng_formatting.py
@@ -1,9 +1,19 @@
import numpy as np
+import pytest
-from pandas import DataFrame
-import pandas._testing as tm
+from pandas import (
+ DataFrame,
+ reset_option,
+ set_eng_float_format,
+)
-import pandas.io.formats.format as fmt
+from pandas.io.formats.format import EngFormatter
+
+
+@pytest.fixture(autouse=True)
+def reset_float_format():
+ yield
+ reset_option("display.float_format")
class TestEngFormatter:
@@ -11,20 +21,19 @@ def test_eng_float_formatter2(self, float_frame):
df = float_frame
df.loc[5] = 0
- fmt.set_eng_float_format()
+ set_eng_float_format()
repr(df)
- fmt.set_eng_float_format(use_eng_prefix=True)
+ set_eng_float_format(use_eng_prefix=True)
repr(df)
- fmt.set_eng_float_format(accuracy=0)
+ set_eng_float_format(accuracy=0)
repr(df)
- tm.reset_display_options()
def test_eng_float_formatter(self):
df = DataFrame({"A": [1.41, 141.0, 14100, 1410000.0]})
- fmt.set_eng_float_format()
+ set_eng_float_format()
result = df.to_string()
expected = (
" A\n"
@@ -35,18 +44,16 @@ def test_eng_float_formatter(self):
)
assert result == expected
- fmt.set_eng_float_format(use_eng_prefix=True)
+ set_eng_float_format(use_eng_prefix=True)
result = df.to_string()
expected = " A\n0 1.410\n1 141.000\n2 14.100k\n3 1.410M"
assert result == expected
- fmt.set_eng_float_format(accuracy=0)
+ set_eng_float_format(accuracy=0)
result = df.to_string()
expected = " A\n0 1E+00\n1 141E+00\n2 14E+03\n3 1E+06"
assert result == expected
- tm.reset_display_options()
-
def compare(self, formatter, input, output):
formatted_input = formatter(input)
assert formatted_input == output
@@ -67,7 +74,7 @@ def compare_all(self, formatter, in_out):
self.compare(formatter, -input, "-" + output[1:])
def test_exponents_with_eng_prefix(self):
- formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [
(f * 10**-24, " 1.414y"),
@@ -125,7 +132,7 @@ def test_exponents_with_eng_prefix(self):
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
- formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
+ formatter = EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [
(f * 10**-24, " 3.1416E-24"),
@@ -183,7 +190,7 @@ def test_exponents_without_eng_prefix(self):
self.compare_all(formatter, in_out)
def test_rounding(self):
- formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
in_out = [
(5.55555, " 5.556"),
(55.5555, " 55.556"),
@@ -194,7 +201,7 @@ def test_rounding(self):
]
self.compare_all(formatter, in_out)
- formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
in_out = [
(5.55555, " 5.6"),
(55.5555, " 55.6"),
@@ -205,7 +212,7 @@ def test_rounding(self):
]
self.compare_all(formatter, in_out)
- formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)
+ formatter = EngFormatter(accuracy=0, use_eng_prefix=True)
in_out = [
(5.55555, " 6"),
(55.5555, " 56"),
@@ -216,14 +223,14 @@ def test_rounding(self):
]
self.compare_all(formatter, in_out)
- formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
result = formatter(0)
assert result == " 0.000"
def test_nan(self):
# Issue #11981
- formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.nan)
assert result == "NaN"
@@ -235,14 +242,13 @@ def test_nan(self):
}
)
pt = df.pivot_table(values="a", index="b", columns="c")
- fmt.set_eng_float_format(accuracy=1)
+ set_eng_float_format(accuracy=1)
result = pt.to_string()
assert "NaN" in result
- tm.reset_display_options()
def test_inf(self):
# Issue #11981
- formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.inf)
assert result == "inf"
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 8cfcc26b95b4c..a7648cf1c471a 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -914,7 +914,6 @@ def get_ipython():
repstr = df._repr_html_()
assert "class" in repstr # info fallback
- tm.reset_display_options()
def test_repr_html(self, float_frame):
df = float_frame
@@ -926,16 +925,12 @@ def test_repr_html(self, float_frame):
with option_context("display.notebook_repr_html", False):
df._repr_html_()
- tm.reset_display_options()
-
df = DataFrame([[1, 2], [3, 4]])
with option_context("display.show_dimensions", True):
assert "2 rows" in df._repr_html_()
with option_context("display.show_dimensions", False):
assert "2 rows" not in df._repr_html_()
- tm.reset_display_options()
-
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
diff --git a/pandas/tests/io/formats/test_to_string.py b/pandas/tests/io/formats/test_to_string.py
index 3a8dcb339b063..e607b6eb454a1 100644
--- a/pandas/tests/io/formats/test_to_string.py
+++ b/pandas/tests/io/formats/test_to_string.py
@@ -412,7 +412,6 @@ def test_to_string_complex_float_formatting(self):
def test_to_string_format_inf(self):
# GH#24861
- tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
@@ -460,7 +459,6 @@ def test_to_string_int_formatting(self):
assert output == expected
def test_to_string_float_formatting(self):
- tm.reset_display_options()
with option_context(
"display.precision",
5,
@@ -495,7 +493,6 @@ def test_to_string_float_formatting(self):
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
- tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
@@ -516,14 +513,12 @@ def test_to_string_decimal(self):
assert df.to_string(decimal=",") == expected
def test_to_string_left_justify_cols(self):
- tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
- tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56153 | 2023-11-24T18:00:35Z | 2023-11-24T22:57:16Z | 2023-11-24T22:57:16Z | 2023-11-25T01:21:56Z |
BUG: translate losing object dtype with new string dtype | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 543a9864ced26..77ce303dc1bfe 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -25,7 +25,7 @@ Bug fixes
- Bug in :meth:`Index.__getitem__` returning wrong result for Arrow dtypes and negative stepsize (:issue:`55832`)
- Fixed bug in :meth:`DataFrame.__setitem__` casting :class:`Index` with object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
- Fixed bug in :meth:`Index.insert` casting object-dtype to PyArrow backed strings when ``infer_string`` option is set (:issue:`55638`)
--
+- Fixed bug in :meth:`Series.str.translate` losing object dtype when string option is set (:issue:`56152`)
.. ---------------------------------------------------------------------------
.. _whatsnew_214.other:
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 58b904fd31b6a..9fa6e9973291d 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -259,6 +259,7 @@ def _wrap_result(
fill_value=np.nan,
returns_string: bool = True,
returns_bool: bool = False,
+ dtype=None,
):
from pandas import (
Index,
@@ -379,29 +380,29 @@ def cons_row(x):
out = out.get_level_values(0)
return out
else:
- return Index(result, name=name)
+ return Index(result, name=name, dtype=dtype)
else:
index = self._orig.index
# This is a mess.
- dtype: DtypeObj | str | None
+ _dtype: DtypeObj | str | None = dtype
vdtype = getattr(result, "dtype", None)
if self._is_string:
if is_bool_dtype(vdtype):
- dtype = result.dtype
+ _dtype = result.dtype
elif returns_string:
- dtype = self._orig.dtype
+ _dtype = self._orig.dtype
else:
- dtype = vdtype
- else:
- dtype = vdtype
+ _dtype = vdtype
+ elif vdtype is not None:
+ _dtype = vdtype
if expand:
cons = self._orig._constructor_expanddim
- result = cons(result, columns=name, index=index, dtype=dtype)
+ result = cons(result, columns=name, index=index, dtype=_dtype)
else:
# Must be a Series
cons = self._orig._constructor
- result = cons(result, name=name, index=index, dtype=dtype)
+ result = cons(result, name=name, index=index, dtype=_dtype)
result = result.__finalize__(self._orig, method="str")
if name is not None and result.ndim == 1:
# __finalize__ might copy over the original name, but we may
@@ -2317,7 +2318,8 @@ def translate(self, table):
dtype: object
"""
result = self._data.array._str_translate(table)
- return self._wrap_result(result)
+ dtype = object if self._data.dtype == "object" else None
+ return self._wrap_result(result, dtype=dtype)
@forbid_nonstring_types(["bytes"])
def count(self, pat, flags: int = 0):
diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py
index 78f0730d730e8..bd64a5dce3b9a 100644
--- a/pandas/tests/strings/test_find_replace.py
+++ b/pandas/tests/strings/test_find_replace.py
@@ -5,6 +5,7 @@
import pytest
from pandas.errors import PerformanceWarning
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
@@ -893,7 +894,10 @@ def test_find_nan(any_string_dtype):
# --------------------------------------------------------------------------------------
-def test_translate(index_or_series, any_string_dtype):
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
+def test_translate(index_or_series, any_string_dtype, infer_string):
obj = index_or_series(
["abcdefg", "abcc", "cdddfg", "cdefggg"], dtype=any_string_dtype
)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56152 | 2023-11-24T15:48:58Z | 2023-11-26T18:22:57Z | 2023-11-26T18:22:57Z | 2023-11-26T18:24:37Z |
DOC: to_datetime origin argument not unit specific | diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 076b506670d40..e42e89b76e6f2 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -843,8 +843,8 @@ def to_datetime(
to the day starting at noon on January 1, 4713 BC.
- If Timestamp convertible (Timestamp, dt.datetime, np.datetimt64 or date
string), origin is set to Timestamp identified by origin.
- - If a float or integer, origin is the millisecond difference
- relative to 1970-01-01.
+ - If a float or integer, origin is the difference
+ (in units determined by the ``unit`` argument) relative to 1970-01-01.
cache : bool, default True
If :const:`True`, use a cache of unique, converted dates to apply the
datetime conversion. May produce significant speed-up when parsing
| - [ ] closes #55874
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Edited the docstring as suggested in the issue tagged. | https://api.github.com/repos/pandas-dev/pandas/pulls/56151 | 2023-11-24T15:34:55Z | 2023-11-24T22:58:56Z | 2023-11-24T22:58:56Z | 2023-11-24T22:59:05Z |
Adjust tests in resample folder for new string option | diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index fee52780585b8..6ecbdff0018fa 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -5,6 +5,7 @@
from pandas import (
DataFrame,
+ Index,
MultiIndex,
NaT,
PeriodIndex,
@@ -255,7 +256,7 @@ def test_resample_count_empty_dataframe(freq, empty_frame_dti):
index = _asfreq_compat(empty_frame_dti.index, freq)
- expected = DataFrame({"a": []}, dtype="int64", index=index)
+ expected = DataFrame(dtype="int64", index=index, columns=Index(["a"], dtype=object))
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 81a054bbbc3df..7e8779ab48b7e 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -197,7 +197,7 @@ def tests_raises_on_nuisance(test_frame):
tm.assert_frame_equal(result, expected)
expected = r[["A", "B", "C"]].mean()
- msg = re.escape("agg function failed [how->mean,dtype->object]")
+ msg = re.escape("agg function failed [how->mean,dtype->")
with pytest.raises(TypeError, match=msg):
r.mean()
result = r.mean(numeric_only=True)
@@ -948,7 +948,7 @@ def test_frame_downsample_method(method, numeric_only, expected_data):
if isinstance(expected_data, str):
if method in ("var", "mean", "median", "prod"):
klass = TypeError
- msg = re.escape(f"agg function failed [how->{method},dtype->object]")
+ msg = re.escape(f"agg function failed [how->{method},dtype->")
else:
klass = ValueError
msg = expected_data
@@ -998,7 +998,7 @@ def test_series_downsample_method(method, numeric_only, expected_data):
with pytest.raises(TypeError, match=msg):
func(**kwargs)
elif method == "prod":
- msg = re.escape("agg function failed [how->prod,dtype->object]")
+ msg = re.escape("agg function failed [how->prod,dtype->")
with pytest.raises(TypeError, match=msg):
func(**kwargs)
else:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56150 | 2023-11-24T14:13:48Z | 2023-11-26T04:30:18Z | 2023-11-26T04:30:18Z | 2023-11-26T11:33:32Z |
DEPR: Some Grouper and Grouping attributes | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0e613760f4927..a77fb18353cd9 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -303,6 +303,8 @@ Other Deprecations
- Deprecated strings ``H``, ``S``, ``U``, and ``N`` denoting units in :func:`to_timedelta` (:issue:`52536`)
- Deprecated strings ``H``, ``T``, ``S``, ``L``, ``U``, and ``N`` denoting units in :class:`Timedelta` (:issue:`52536`)
- Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting frequencies in :class:`Minute`, :class:`Second`, :class:`Milli`, :class:`Micro`, :class:`Nano` (:issue:`52536`)
+- Deprecated the :class:`.BaseGrouper` attributes ``group_keys_seq`` and ``reconstructed_codes``; these will be removed in a future version of pandas (:issue:`56148`)
+- Deprecated the :class:`.Grouping` attributes ``group_index``, ``result_index``, and ``group_arraylike``; these will be removed in a future version of pandas (:issue:`56148`)
- Deprecated the ``errors="ignore"`` option in :func:`to_datetime`, :func:`to_timedelta`, and :func:`to_numeric`; explicitly catch exceptions instead (:issue:`54467`)
- Deprecated the ``fastpath`` keyword in the :class:`Series` constructor (:issue:`20110`)
- Deprecated the ``ordinal`` keyword in :class:`PeriodIndex`, use :meth:`PeriodIndex.from_ordinals` instead (:issue:`55960`)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 1fb412e17c4ba..55ab4c2113fd7 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -819,9 +819,9 @@ def value_counts(
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
- codes = self.grouper.reconstructed_codes
+ codes = self.grouper._reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
- levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
+ levels = [ping._group_index for ping in self.grouper.groupings] + [lev]
if dropna:
mask = codes[-1] != -1
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 2d530b64e104b..7d284db4eba2c 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2820,7 +2820,7 @@ def _value_counts(
and not grouping._observed
for grouping in groupings
):
- levels_list = [ping.result_index for ping in groupings]
+ levels_list = [ping._result_index for ping in groupings]
multi_index = MultiIndex.from_product(
levels_list, names=[ping.name for ping in groupings]
)
@@ -5573,7 +5573,7 @@ def _reindex_output(
):
return output
- levels_list = [ping.group_index for ping in groupings]
+ levels_list = [ping._group_index for ping in groupings]
names = self.grouper.names
if qs is not None:
# error: Argument 1 to "append" of "list" has incompatible type
@@ -5795,7 +5795,7 @@ def _idxmax_idxmin(
ping._passed_categorical for ping in self.grouper.groupings
):
expected_len = np.prod(
- [len(ping.group_index) for ping in self.grouper.groupings]
+ [len(ping._group_index) for ping in self.grouper.groupings]
)
if len(self.grouper.groupings) == 1:
result_len = len(self.grouper.groupings[0].grouping_vector.unique())
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index fd0479e17d2bd..fc914831b7a72 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -523,7 +523,6 @@ class Grouping:
"""
_codes: npt.NDArray[np.signedinteger] | None = None
- _group_index: Index | None = None
_all_grouper: Categorical | None
_orig_cats: Index | None
_index: Index
@@ -679,7 +678,7 @@ def _ilevel(self) -> int | None:
@property
def ngroups(self) -> int:
- return len(self.group_index)
+ return len(self._group_index)
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
@@ -695,34 +694,58 @@ def codes(self) -> npt.NDArray[np.signedinteger]:
return self._codes_and_uniques[0]
@cache_readonly
- def group_arraylike(self) -> ArrayLike:
+ def _group_arraylike(self) -> ArrayLike:
"""
Analogous to result_index, but holding an ArrayLike to ensure
we can retain ExtensionDtypes.
"""
if self._all_grouper is not None:
# retain dtype for categories, including unobserved ones
- return self.result_index._values
+ return self._result_index._values
elif self._passed_categorical:
- return self.group_index._values
+ return self._group_index._values
return self._codes_and_uniques[1]
+ @property
+ def group_arraylike(self) -> ArrayLike:
+ """
+ Analogous to result_index, but holding an ArrayLike to ensure
+ we can retain ExtensionDtypes.
+ """
+ warnings.warn(
+ "group_arraylike is deprecated and will be removed in a future "
+ "version of pandas",
+ category=FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._group_arraylike
+
@cache_readonly
- def result_index(self) -> Index:
+ def _result_index(self) -> Index:
# result_index retains dtype for categories, including unobserved ones,
# which group_index does not
if self._all_grouper is not None:
- group_idx = self.group_index
+ group_idx = self._group_index
assert isinstance(group_idx, CategoricalIndex)
cats = self._orig_cats
# set_categories is dynamically added
return group_idx.set_categories(cats) # type: ignore[attr-defined]
- return self.group_index
+ return self._group_index
+
+ @property
+ def result_index(self) -> Index:
+ warnings.warn(
+ "result_index is deprecated and will be removed in a future "
+ "version of pandas",
+ category=FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._result_index
@cache_readonly
- def group_index(self) -> Index:
+ def _group_index(self) -> Index:
codes, uniques = self._codes_and_uniques
if not self._dropna and self._passed_categorical:
assert isinstance(uniques, Categorical)
@@ -744,6 +767,16 @@ def group_index(self) -> Index:
)
return Index._with_infer(uniques, name=self.name)
+ @property
+ def group_index(self) -> Index:
+ warnings.warn(
+ "group_index is deprecated and will be removed in a future "
+ "version of pandas",
+ category=FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._group_index
+
@cache_readonly
def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:
uniques: ArrayLike
@@ -809,7 +842,7 @@ def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
- cats = Categorical.from_codes(self.codes, self.group_index, validate=False)
+ cats = Categorical.from_codes(self.codes, self._group_index, validate=False)
return self._index.groupby(cats)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 466bbac641077..f3579e6c13a19 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -15,6 +15,7 @@
Generic,
final,
)
+import warnings
import numpy as np
@@ -32,6 +33,7 @@
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
@@ -616,7 +618,7 @@ def get_iterator(
for each group
"""
splitter = self._get_splitter(data, axis=axis)
- keys = self.group_keys_seq
+ keys = self._group_keys_seq
yield from zip(keys, splitter)
@final
@@ -638,7 +640,7 @@ def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:
@final
@cache_readonly
- def group_keys_seq(self):
+ def _group_keys_seq(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
@@ -647,6 +649,16 @@ def group_keys_seq(self):
# provide "flattened" iterator for multi-group setting
return get_flattened_list(ids, ngroups, self.levels, self.codes)
+ @property
+ def group_keys_seq(self):
+ warnings.warn(
+ "group_keys_seq is deprecated and will be removed in a future "
+ "version of pandas",
+ category=FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._group_keys_seq
+
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""dict {group name -> group indices}"""
@@ -654,7 +666,7 @@ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
- keys = [ping.group_index for ping in self.groupings]
+ keys = [ping._group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@final
@@ -691,7 +703,7 @@ def codes(self) -> list[npt.NDArray[np.signedinteger]]:
@property
def levels(self) -> list[Index]:
- return [ping.group_index for ping in self.groupings]
+ return [ping._group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
@@ -766,7 +778,7 @@ def _get_compressed_codes(
# FIXME: compress_group_index's second return value is int64, not intp
ping = self.groupings[0]
- return ping.codes, np.arange(len(ping.group_index), dtype=np.intp)
+ return ping.codes, np.arange(len(ping._group_index), dtype=np.intp)
@final
@cache_readonly
@@ -774,18 +786,28 @@ def ngroups(self) -> int:
return len(self.result_index)
@property
- def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
+ def _reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
codes = self.codes
ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
+ @property
+ def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
+ warnings.warn(
+ "reconstructed_codes is deprecated and will be removed in a future "
+ "version of pandas",
+ category=FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._reconstructed_codes
+
@cache_readonly
def result_index(self) -> Index:
if len(self.groupings) == 1:
- return self.groupings[0].result_index.rename(self.names[0])
+ return self.groupings[0]._result_index.rename(self.names[0])
- codes = self.reconstructed_codes
- levels = [ping.result_index for ping in self.groupings]
+ codes = self._reconstructed_codes
+ levels = [ping._result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@@ -795,12 +817,12 @@ def get_group_levels(self) -> list[ArrayLike]:
# Note: only called from _insert_inaxis_grouper, which
# is only called for BaseGrouper, never for BinGrouper
if len(self.groupings) == 1:
- return [self.groupings[0].group_arraylike]
+ return [self.groupings[0]._group_arraylike]
name_list = []
- for ping, codes in zip(self.groupings, self.reconstructed_codes):
+ for ping, codes in zip(self.groupings, self._reconstructed_codes):
codes = ensure_platform_int(codes)
- levels = ping.group_arraylike.take(codes)
+ levels = ping._group_arraylike.take(codes)
name_list.append(levels)
@@ -907,7 +929,7 @@ def apply_groupwise(
) -> tuple[list, bool]:
mutated = False
splitter = self._get_splitter(data, axis=axis)
- group_keys = self.group_keys_seq
+ group_keys = self._group_keys_seq
result_values = []
# This calls DataSplitter.__iter__
@@ -1087,7 +1109,7 @@ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
)
@cache_readonly
- def reconstructed_codes(self) -> list[np.ndarray]:
+ def _reconstructed_codes(self) -> list[np.ndarray]:
# get unique result indices, and prepend 0 as groupby starts from the first
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index c61d9fab0435e..5b17484de9c93 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -3303,3 +3303,13 @@ def test_groupby_ffill_with_duplicated_index():
result = df.groupby(level=0).ffill()
expected = DataFrame({"a": [1, 2, 3, 4, 2, 3]}, index=[0, 1, 2, 0, 1, 2])
tm.assert_frame_equal(result, expected, check_dtype=False)
+
+
+@pytest.mark.parametrize("attr", ["group_keys_seq", "reconstructed_codes"])
+def test_depr_grouper_attrs(attr):
+ # GH#56148
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
+ gb = df.groupby("a")
+ msg = f"{attr} is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ getattr(gb.grouper, attr)
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index e3cc41afa4679..3c1a35c984031 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -1211,3 +1211,13 @@ def test_grouper_groups():
msg = "Grouper.indexer is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
grper.indexer
+
+
+@pytest.mark.parametrize("attr", ["group_index", "result_index", "group_arraylike"])
+def test_depr_grouping_attrs(attr):
+ # GH#56148
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
+ gb = df.groupby("a")
+ msg = f"{attr} is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ getattr(gb.grouper.groupings[0], attr)
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index be02c7f79ba01..aaebb00dd8ad4 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -67,7 +67,9 @@ def groupby_with_truncated_bingrouper(frame_for_truncated_bingrouper):
gb = df.groupby(tdg)
# check we're testing the case we're interested in
- assert len(gb.grouper.result_index) != len(gb.grouper.group_keys_seq)
+ msg = "group_keys_seq is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert len(gb.grouper.result_index) != len(gb.grouper.group_keys_seq)
return gb
| - [x] closes #56148 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56149 | 2023-11-24T13:45:33Z | 2023-11-26T04:35:43Z | 2023-11-26T04:35:43Z | 2023-11-26T13:35:37Z |
BUG raise pdep6 warning for loc full setter | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 6a232365fbfeb..64d297069080f 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -797,6 +797,7 @@ Conversion
- Bug in :meth:`DataFrame.astype` when called with ``str`` on unpickled array - the array might change in-place (:issue:`54654`)
- Bug in :meth:`DataFrame.astype` where ``errors="ignore"`` had no effect for extension types (:issue:`54654`)
- Bug in :meth:`Series.convert_dtypes` not converting all NA column to ``null[pyarrow]`` (:issue:`55346`)
+- Bug in ``DataFrame.loc`` was not throwing "incompatible dtype warning" (see `PDEP6 <https://pandas.pydata.org/pdeps/0006-ban-upcasting.html>`_) when assigning a ``Series`` with a different dtype using a full column setter (e.g. ``df.loc[:, 'a'] = incompatible_value``) (:issue:`39584`)
Strings
^^^^^^^
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index a7dd3b486ab11..0f892d4924933 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2143,6 +2143,26 @@ def _setitem_single_column(self, loc: int, value, plane_indexer) -> None:
# If we're setting an entire column and we can't do it inplace,
# then we can use value's dtype (or inferred dtype)
# instead of object
+ dtype = self.obj.dtypes.iloc[loc]
+ if dtype not in (np.void, object) and not self.obj.empty:
+ # - Exclude np.void, as that is a special case for expansion.
+ # We want to warn for
+ # df = pd.DataFrame({'a': [1, 2]})
+ # df.loc[:, 'a'] = .3
+ # but not for
+ # df = pd.DataFrame({'a': [1, 2]})
+ # df.loc[:, 'b'] = .3
+ # - Exclude `object`, as then no upcasting happens.
+ # - Exclude empty initial object with enlargement,
+ # as then there's nothing to be inconsistent with.
+ warnings.warn(
+ f"Setting an item of incompatible dtype is deprecated "
+ "and will raise in a future error of pandas. "
+ f"Value '{value}' has dtype incompatible with {dtype}, "
+ "please explicitly cast to a compatible dtype first.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
self.obj.isetitem(loc, value)
else:
# set value into the column (first attempting to operate inplace, then
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 8a54cb2d7a189..1237c5b86d298 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -499,6 +499,9 @@ def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block:
and is_integer_dtype(self.values.dtype)
and isna(other)
and other is not NaT
+ and not (
+ isinstance(other, (np.datetime64, np.timedelta64)) and np.isnat(other)
+ )
):
warn_on_upcast = False
elif (
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 91cd77741f79b..422436d376f69 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -1103,11 +1103,16 @@ def test_set_value_copy_only_necessary_column(
df_orig = df.copy()
view = df[:]
- if val == "a" and indexer[0] != slice(None):
+ if val == "a" and not warn_copy_on_write:
with tm.assert_produces_warning(
FutureWarning, match="Setting an item of incompatible dtype is deprecated"
):
indexer_func(df)[indexer] = val
+ if val == "a" and warn_copy_on_write:
+ with tm.assert_produces_warning(
+ FutureWarning, match="incompatible dtype|Setting a value on a view"
+ ):
+ indexer_func(df)[indexer] = val
else:
with tm.assert_cow_warning(warn_copy_on_write and val == 100):
indexer_func(df)[indexer] = val
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index a1868919be685..a9ee31299d469 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -945,7 +945,8 @@ def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
- df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
@@ -1381,20 +1382,20 @@ def test_loc_expand_empty_frame_keep_midx_names(self):
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
- "val, idxr, warn",
+ "val, idxr",
[
- ("x", "a", None), # TODO: this should warn as well
- ("x", ["a"], None), # TODO: this should warn as well
- (1, "a", None), # TODO: this should warn as well
- (1, ["a"], FutureWarning),
+ ("x", "a"),
+ ("x", ["a"]),
+ (1, "a"),
+ (1, ["a"]),
],
)
- def test_loc_setitem_rhs_frame(self, idxr, val, warn):
+ def test_loc_setitem_rhs_frame(self, idxr, val):
# GH#47578
df = DataFrame({"a": [1, 2]})
with tm.assert_produces_warning(
- warn, match="Setting an item of incompatible dtype"
+ FutureWarning, match="Setting an item of incompatible dtype"
):
df.loc[:, idxr] = DataFrame({"a": [val, 11]}, index=[1, 2])
expected = DataFrame({"a": [np.nan, val]})
@@ -1968,7 +1969,7 @@ def _check_setitem_invalid(self, df, invalid, indexer, warn):
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
- _indexers = [0, [0], slice(0, 1), [True, False, False]]
+ _indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]
@pytest.mark.parametrize(
"invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]
@@ -1982,7 +1983,7 @@ def test_setitem_validation_scalar_bool(self, invalid, indexer):
@pytest.mark.parametrize("indexer", _indexers)
def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):
df = DataFrame({"a": [1, 2, 3]}, dtype=any_int_numpy_dtype)
- if isna(invalid) and invalid is not pd.NaT:
+ if isna(invalid) and invalid is not pd.NaT and not np.isnat(invalid):
warn = None
else:
warn = FutureWarning
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 3f13718cfc77a..72cd98ba78122 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -1369,3 +1369,23 @@ def test_frame_setitem_empty_dataframe(self):
index=dti[:0],
)
tm.assert_frame_equal(df, expected)
+
+
+def test_full_setter_loc_incompatible_dtype():
+ # https://github.com/pandas-dev/pandas/issues/55791
+ df = DataFrame({"a": [1, 2]})
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "a"] = True
+ expected = DataFrame({"a": [True, True]})
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame({"a": [1, 2]})
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "a"] = {0: 3.5, 1: 4.5}
+ expected = DataFrame({"a": [3.5, 4.5]})
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame({"a": [1, 2]})
+ df.loc[:, "a"] = {0: 3, 1: 4}
+ expected = DataFrame({"a": [3, 4]})
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
index fd4c9d64d656e..565619005d9f0 100644
--- a/pandas/tests/frame/methods/test_update.py
+++ b/pandas/tests/frame/methods/test_update.py
@@ -158,11 +158,8 @@ def test_update_with_different_dtype(self, using_copy_on_write):
# GH#3217
df = DataFrame({"a": [1, 3], "b": [np.nan, 2]})
df["c"] = np.nan
- if using_copy_on_write:
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
df.update({"c": Series(["foo"], index=[0])})
- else:
- with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
- df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
{
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 6d52bf161f4fa..c66b6a0f8b99b 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2815,7 +2815,7 @@ def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type):
)
result = DataFrame({key_val: [1, 2]}, columns=cols)
expected = DataFrame([[1, np.nan], [2, np.nan]], columns=cols)
- expected.iloc[:, 1] = expected.iloc[:, 1].astype(object)
+ expected.isetitem(1, expected.iloc[:, 1].astype(object))
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index f9c6939654ea1..7b2a9dd99d925 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -534,7 +534,8 @@ def test_iloc_setitem_frame_duplicate_columns_multiple_blocks(self):
# if the assigned values cannot be held by existing integer arrays,
# we cast
- df.iloc[:, 0] = df.iloc[:, 0] + 0.5
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.iloc[:, 0] = df.iloc[:, 0] + 0.5
assert len(df._mgr.blocks) == 2
expected = df.copy()
@@ -1468,6 +1469,7 @@ def test_iloc_setitem_pure_position_based(self):
def test_iloc_nullable_int64_size_1_nan(self):
# GH 31861
result = DataFrame({"a": ["test"], "b": [np.nan]})
- result.loc[:, "b"] = result.loc[:, "b"].astype("Int64")
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ result.loc[:, "b"] = result.loc[:, "b"].astype("Int64")
expected = DataFrame({"a": ["test"], "b": array([NA], dtype="Int64")})
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index c897afaeeee0e..ea52ed57c1a1b 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -578,7 +578,8 @@ def test_loc_setitem_consistency(self, frame_for_consistency, val):
}
)
df = frame_for_consistency.copy()
- df.loc[:, "date"] = val
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = val
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_dt64_to_str(self, frame_for_consistency):
@@ -592,7 +593,8 @@ def test_loc_setitem_consistency_dt64_to_str(self, frame_for_consistency):
}
)
df = frame_for_consistency.copy()
- df.loc[:, "date"] = "foo"
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = "foo"
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_dt64_to_float(self, frame_for_consistency):
@@ -605,14 +607,16 @@ def test_loc_setitem_consistency_dt64_to_float(self, frame_for_consistency):
}
)
df = frame_for_consistency.copy()
- df.loc[:, "date"] = 1.0
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_single_row(self):
# GH 15494
# setting on frame with single row
df = DataFrame({"date": Series([Timestamp("20180101")])})
- df.loc[:, "date"] = "string"
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = "string"
expected = DataFrame({"date": Series(["string"])})
tm.assert_frame_equal(df, expected)
@@ -672,9 +676,10 @@ def test_loc_setitem_consistency_slice_column_len(self):
# timedelta64[m] -> float, so this cannot be done inplace, so
# no warning
- df.loc[:, ("Respondent", "Duration")] = df.loc[
- :, ("Respondent", "Duration")
- ] / Timedelta(60_000_000_000)
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, ("Respondent", "Duration")] = df.loc[
+ :, ("Respondent", "Duration")
+ ] / Timedelta(60_000_000_000)
expected = Series(
[23.0, 12.0, 14.0, 36.0], index=df.index, name=("Respondent", "Duration")
@@ -1481,7 +1486,11 @@ def test_loc_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture):
# if result started off with object dtype, then the .loc.__setitem__
# below would retain object dtype
result = DataFrame(index=idx, columns=["var"], dtype=np.float64)
- result.loc[:, idxer] = expected
+ with tm.assert_produces_warning(
+ FutureWarning if idxer == "var" else None, match="incompatible dtype"
+ ):
+ # See https://github.com/pandas-dev/pandas/issues/56223
+ result.loc[:, idxer] = expected
tm.assert_frame_equal(result, expected)
def test_loc_setitem_time_key(self):
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 7254fd7cb345d..1527f2219d7b6 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -168,7 +168,7 @@ def test_frame_non_unique_columns(self, orient, data):
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
- expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
+ expected.isetitem(0, expected.iloc[:, 0].astype(np.int64) // 1000000)
elif orient == "split":
expected = df
expected.columns = ["x", "x.1"]
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 72e6457e65e3c..1dcecc3d9b09d 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -2964,9 +2964,9 @@ def test_merge_empty_frames_column_order(left_empty, right_empty):
if left_empty and right_empty:
expected = expected.iloc[:0]
elif left_empty:
- expected.loc[:, "B"] = np.nan
+ expected["B"] = np.nan
elif right_empty:
- expected.loc[:, ["C", "D"]] = np.nan
+ expected[["C", "D"]] = np.nan
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index c52e47a812183..f4992b758af74 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -491,7 +491,7 @@ def _check_setitem_invalid(self, ser, invalid, indexer, warn):
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
- _indexers = [0, [0], slice(0, 1), [True, False, False]]
+ _indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]
@pytest.mark.parametrize(
"invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]
@@ -505,7 +505,7 @@ def test_setitem_validation_scalar_bool(self, invalid, indexer):
@pytest.mark.parametrize("indexer", _indexers)
def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):
ser = Series([1, 2, 3], dtype=any_int_numpy_dtype)
- if isna(invalid) and invalid is not NaT:
+ if isna(invalid) and invalid is not NaT and not np.isnat(invalid):
warn = None
else:
warn = FutureWarning
| - [x] addresses the inconsistency brought up in https://github.com/pandas-dev/pandas/issues/39584#issuecomment-1748891309
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56146 | 2023-11-24T11:56:17Z | 2024-01-09T22:30:06Z | 2024-01-09T22:30:06Z | 2024-01-10T12:47:44Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.